From 6dd304f2936d68c7f013b074d247d69b7f399e4c Mon Sep 17 00:00:00 2001 From: Yael Date: Wed, 29 Oct 2025 10:45:02 +0200 Subject: [PATCH 1/4] feat: add Tekton EventListener for MLOps benchmark testing workflows Implement complete EventListener infrastructure to trigger MLOps batch analysis via webhook, including pipeline tasks for API calls and status polling, comprehensive documentation, and Makefile integration for automated deployment. --- deploy/Makefile | 75 ++- deploy/tekton/eventlistener/README.md | 433 ++++++++++++++++++ .../eventlistener/benchmark-config.yaml | 26 ++ .../eventlistener/benchmark-pipeline.yaml | 183 ++++++++ .../eventlistener/call-orchestrator-api.yaml | 185 ++++++++ .../tekton/eventlistener/eventlistener.yaml | 38 ++ .../tekton/eventlistener/kustomization.yaml | 32 ++ .../eventlistener/poll-batch-status.yaml | 210 +++++++++ .../tekton/eventlistener/triggerbinding.yaml | 45 ++ .../tekton/eventlistener/triggertemplate.yaml | 90 ++++ 10 files changed, 1316 insertions(+), 1 deletion(-) create mode 100644 deploy/tekton/eventlistener/README.md create mode 100644 deploy/tekton/eventlistener/benchmark-config.yaml create mode 100644 deploy/tekton/eventlistener/benchmark-pipeline.yaml create mode 100644 deploy/tekton/eventlistener/call-orchestrator-api.yaml create mode 100644 deploy/tekton/eventlistener/eventlistener.yaml create mode 100644 deploy/tekton/eventlistener/kustomization.yaml create mode 100644 deploy/tekton/eventlistener/poll-batch-status.yaml create mode 100644 deploy/tekton/eventlistener/triggerbinding.yaml create mode 100644 deploy/tekton/eventlistener/triggertemplate.yaml diff --git a/deploy/Makefile b/deploy/Makefile index 2421d5be..ae562b45 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -56,6 +56,9 @@ S3_INPUT_BUCKET_NAME ?= test GITHUB_REPO_URL ?= https://github.com/RHEcosystemAppEng/sast-ai-workflow.git ARGOCD_NAMESPACE ?= sast-ai +# EventListener Configuration +ORCHESTRATOR_API_URL ?= + # Secret configuration (loaded from .env file) GITLAB_TOKEN ?= "" LLM_API_KEY ?= "" @@ -294,7 +297,7 @@ run: @echo " Container Image: $(CONTAINER_IMAGE)" @echo " ๐Ÿ”„ Removing old pipeline runs..." @$(CO) delete pipelinerun sast-ai-workflow-pipelinerun \ - -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 + -n $(NAMESPACE) --ignore-not-found # Create PipelineRun with current parameters @sed \ -e 's|PROJECT_NAME_PLACEHOLDER|$(PROJECT_NAME)|g' \ @@ -362,6 +365,73 @@ argocd-clean: $(CO) patch application sast-ai-tekton-pipeline-syncer-prod -n $(NAMESPACE) -p '{"metadata":{"finalizers":null}}' --type=merge > /dev/null 2>&1 || true @echo " โœ“ ArgoCD Applications removed" +eventlistener: + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo "๐ŸŽฏ SAST AI Workflow - EventListener for MLOps Benchmarking" + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo " Context: $(CONTEXT)" + @echo " Namespace: $(NAMESPACE)" + @echo "" + @echo "๐Ÿ“‹ Validating EventListener configuration..." + @if [ -z "$(ORCHESTRATOR_API_URL)" ]; then \ + echo " โŒ ORCHESTRATOR_API_URL is required"; \ + echo ""; \ + echo "Usage:"; \ + echo " make eventlistener ORCHESTRATOR_API_URL= NAMESPACE="; \ + echo ""; \ + echo "Example:"; \ + echo " make eventlistener \\"; \ + echo " ORCHESTRATOR_API_URL=http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 \\"; \ + echo " NAMESPACE=sast-ai"; \ + echo ""; \ + echo "Tip: Find your orchestrator URL with:"; \ + echo " oc get svc -l app=sast-ai-orchestrator"; \ + echo ""; \ + exit 1; \ + fi + @echo " โœ“ Configuration validated" + @echo " โ€ข Orchestrator URL: $(ORCHESTRATOR_API_URL)" + @echo "" + @echo "๐Ÿ”ง Generating benchmark-config.yaml..." + @cat > tekton/eventlistener/benchmark-config.yaml < /dev/null 2>&1 || true + @echo " โœ“ EventListener resources removed" + clean: @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo "๐Ÿงน SAST AI Workflow - Cleanup" @@ -384,6 +454,9 @@ clean: @if [ "$(ENV)" = "prod" ]; then \ $(CO) delete -k tekton/overlays/prod -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ echo " โœ“ Production Tekton resources removed (kustomize overlay)"; \ + elif [ "$(ENV)" = "mlop" ]; then \ + $(CO) delete -k tekton/overlays/mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ + echo " โœ“ MLOp Tekton resources removed (kustomize overlay)"; \ else \ $(CO) delete -k tekton/base -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ echo " โœ“ Base Tekton resources removed (kustomize base)"; \ diff --git a/deploy/tekton/eventlistener/README.md b/deploy/tekton/eventlistener/README.md new file mode 100644 index 00000000..6b571c05 --- /dev/null +++ b/deploy/tekton/eventlistener/README.md @@ -0,0 +1,433 @@ +# Tekton EventListener for MLOps Benchmarking + +This directory contains a Tekton EventListener implementation that triggers the sast-ai-orchestrator MLOps batch API via webhook. This enables automated MLOps performance testing and benchmarking with DVC data versioning and S3 integration. + +## ๐ŸŽฏ Purpose + +Enable MLOps benchmark testing for batch SAST analysis jobs: +- โœ… Webhook-based triggering (curl/HTTP POST) +- โœ… Integration with sast-ai-orchestrator MLOps API (`/api/v1/mlops-batches`) +- โœ… DVC data versioning support +- โœ… S3 object storage integration +- โœ… Container image version testing +- โœ… Separation from production workflows +- โœ… Fork-friendly configuration + +## ๐Ÿ“ Directory Contents + +``` +eventlistener/ +โ”œโ”€โ”€ README.md # This file +โ”œโ”€โ”€ kustomization.yaml # Kustomize configuration +โ”œโ”€โ”€ benchmark-config.yaml # ConfigMap (generated by make eventlistener) +โ”œโ”€โ”€ benchmark-config.yaml.example # Template (optional reference) +โ”œโ”€โ”€ call-orchestrator-api.yaml # Task that calls orchestrator MLOps API +โ”œโ”€โ”€ poll-batch-status.yaml # Task that monitors batch completion +โ”œโ”€โ”€ benchmark-pipeline.yaml # MLOps benchmark pipeline +โ”œโ”€โ”€ eventlistener.yaml # EventListener + Service +โ”œโ”€โ”€ triggerbinding.yaml # Extracts webhook parameters (including MLOps params) +โ”œโ”€โ”€ triggertemplate.yaml # Generates PipelineRuns +โ””โ”€โ”€ test-eventlistener.sh # Helper script for testing +``` + +**Note:** `benchmark-config.yaml` is automatically generated when you run `make eventlistener` with the required parameters. + +## ๐Ÿ“‹ Prerequisites + +- OpenShift/Kubernetes cluster with Tekton Pipelines installed +- `oc` or `kubectl` CLI tool +- `curl` for sending test requests +- (Optional) `tkn` CLI for easier pipeline management +- (Optional) `jq` for JSON parsing + +Check Tekton installation: +```bash +oc get pods -n openshift-pipelines +# or +kubectl get pods -n tekton-pipelines +``` + +## ๐Ÿš€ Quick Start + +### Step 1: Deploy MLOps Pipeline + +First, ensure you have the MLOps pipeline deployed: + +```bash +cd deploy +make tasks ENV=mlop NAMESPACE=your-namespace +``` + +### Step 2: Find Your Orchestrator URL + +```bash +# Find orchestrator service +oc get svc -l app=sast-ai-orchestrator -n your-namespace + +# Typical format: +# http://..svc.cluster.local: +# Example: http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 +``` + +### Step 3: Deploy EventListener + +Deploy the EventListener with required parameters: + +```bash +cd deploy +make eventlistener \ + ORCHESTRATOR_API_URL=http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 \ + NAMESPACE=your-namespace +``` + +**Required Parameters:** +- `ORCHESTRATOR_API_URL` - Orchestrator service URL (cluster-internal) +- `NAMESPACE` - Target namespace + +**What happens:** +- โœ… Validates required parameters +- โœ… Generates `benchmark-config.yaml` with orchestrator URL and API endpoint +- โœ… Deploys all EventListener resources via Kustomize +- โœ… Shows verification and testing commands + +**Note:** The Google Sheet URL is provided via the webhook payload when triggering the EventListener, not during deployment. + +**Note:** The EventListener always calls `/api/v1/mlops-batches` endpoint (hardcoded for MLOps benchmarking). + +Verify deployment: +```bash +oc get eventlistener,task,pipeline,cm -l app.kubernetes.io/component=benchmark-mlop -n your-namespace +``` + +### Step 4: Test the EventListener + +**Option A: Manual testing** + +1. Port-forward to the EventListener service (for testing from outside the cluster): +```bash +oc port-forward svc/el-benchmark-mlop-listener 8080:8080 -n your-namespace +``` + +**Note:** Port-forwarding is **only needed for external testing** (e.g., from your local machine). The EventListener service is already accessible within the cluster at: +``` +http://el-benchmark-mlop-listener..svc.cluster.local:8080 +``` + +2. In another terminal, send a test request from your local machine: +```bash +curl -X POST http://localhost:8080 \ + -H 'Content-Type: application/json' \ + -d '{ + "batch_sheet_url": "https://docs.google.com/spreadsheets/d/YOUR_TEST_SHEET/edit", + "submitted_by": "manual-test", + "dvc_repo_url": "https://gitlab.com/your-org/dvc-repo.git", + "dvc_data_version": "v1.0.0", + "s3_endpoint_url": "https://s3.amazonaws.com", + "s3_input_bucket_name": "mlops-test-data" + }' +``` + +**Optional:** Test with custom container image: +```bash +curl -X POST http://localhost:8080 \ + -H 'Content-Type: application/json' \ + -d '{ + "batch_sheet_url": "https://docs.google.com/spreadsheets/d/YOUR_TEST_SHEET/edit", + "submitted_by": "version-test", + "dvc_repo_url": "https://gitlab.com/your-org/dvc-repo.git", + "dvc_data_version": "v1.0.0", + "s3_endpoint_url": "https://s3.amazonaws.com", + "s3_input_bucket_name": "mlops-test-data", + "image_version": "v2.1.0" + }' +``` + +3. Watch the PipelineRun: +```bash +# With tkn CLI +tkn pipelinerun logs -L -f + +# With kubectl/oc +oc get pipelinerun -l app.kubernetes.io/component=benchmark-mlop +oc logs -l tekton.dev/pipelineTask=call-orchestrator-api -f +``` + +## ๐Ÿ“Š Expected Results + +### Successful Test + +When everything works correctly, you should see: + +1. **EventListener Response** (HTTP 201): +```json +{ + "eventListener": "benchmark-mlop-listener", + "namespace": "your-namespace", + "eventID": "abc123..." +} +``` + +2. **PipelineRun Created**: +```bash +$ oc get pipelinerun -l app.kubernetes.io/component=benchmark-mlop +NAME SUCCEEDED REASON STARTTIME COMPLETIONTIME +benchmark-mlop-pipeline-abc123 True Succeeded 5m 2m +``` + +3. **Task Logs Show API Call**: +``` +========================================= +Calling Orchestrator MLOps Batch API +========================================= +Configuration: + Orchestrator URL: http://sast-ai-orchestrator... + API Endpoint: /api/v1/mlops-batches (MLOps benchmarking) + Batch Sheet URL: https://docs.google.com/... + DVC Repo: https://gitlab.com/... + S3 Bucket: mlops-test-data + ... +โœ“ API call successful! +Batch ID: batch-12345 + +Polling batch status... +โœ“ Batch completed successfully! +``` + +### Troubleshooting + +#### EventListener Pod Not Running + +```bash +# Check pod status +oc get pods -l eventlistener=benchmark-mlop-listener + +# Check pod logs +oc logs -l eventlistener=benchmark-mlop-listener +``` + +**Common issues:** +- Service account `pipeline` doesn't exist (create with Tekton operator) +- RBAC permissions missing + +#### API Call Fails + +Check task logs for detailed error: +```bash +oc logs -l tekton.dev/pipelineTask=call-orchestrator-api --tail=100 +``` + +**Common issues:** +- Orchestrator URL incorrect in ConfigMap +- Orchestrator service not running: `oc get pods -l app=sast-ai-orchestrator` +- Network policy blocking connections +- Google Sheet URL not accessible by orchestrator + +#### Verify ConfigMap + +```bash +# View current configuration +oc get configmap benchmark-config -o yaml -n your-namespace + +# Update if needed - regenerate with new parameters +cd deploy +make eventlistener \ + ORCHESTRATOR_API_URL= \ + NAMESPACE=your-namespace +``` + +## ๐Ÿ”ง Configuration Reference + +### Webhook Payload Format + +Send JSON payload with these fields: + +```json +{ + "batch_sheet_url": "https://docs.google.com/spreadsheets/d/SHEET_ID/edit", + "submitted_by": "trigger-source", + "dvc_repo_url": "https://gitlab.com/org/dvc-repo.git", + "dvc_data_version": "v1.2.3", + "s3_endpoint_url": "https://s3.amazonaws.com", + "s3_input_bucket_name": "mlops-data", + "image_version": "v2.0.0" +} +``` + +**Required Fields:** +- `batch_sheet_url` - Google Sheet with package list +- `dvc_repo_url` - DVC repository URL +- `dvc_data_version` - DVC data version tag +- `s3_endpoint_url` - S3 endpoint URL +- `s3_input_bucket_name` - S3 bucket name + +**Optional Fields:** +- `submitted_by` - Defaults to "eventlistener-webhook" +- `image_version` - Override workflow version for testing (e.g., "v2.1.0", "sha-abc123") + +### ConfigMap Keys + +The `benchmark-config` ConfigMap is automatically generated by `make eventlistener`: + +| Key | Description | Example | +|-----|-------------|---------| +| `orchestrator-api-url` | Base URL of orchestrator service | `http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080` | +| `api-batch-endpoint` | API endpoint path for MLOps batches | `/api/v1/mlops-batches` | + +**Note:** The `api-batch-endpoint` is automatically set to `/api/v1/mlops-batches` for MLOps benchmarking. + +**To regenerate:** Simply run `make eventlistener` again with updated parameters. + +### Pipeline Parameters + +| Parameter | Type | Required | Default | Description | +|-----------|------|----------|---------|-------------| +| `batch-sheet-url` | string | Yes | - | Google Sheet with package list | +| `submitted-by` | string | No | `eventlistener-webhook` | Trigger source identifier | +| `dvc-repo-url` | string | Yes | - | DVC repository URL for data versioning | +| `dvc-data-version` | string | Yes | - | DVC data version tag | +| `s3-endpoint-url` | string | Yes | - | S3 endpoint URL | +| `s3-input-bucket-name` | string | Yes | - | S3 bucket name for input data | +| `image-version` | string | No | (default from pipeline) | Workflow image version for testing (tag only, e.g., "v2.1.0") | + +## ๐ŸŽ“ Understanding the Architecture + +### Flow Diagram + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ curl โ”‚ POST JSON payload +โ”‚ webhook โ”‚โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ EventListener โ”‚ + โ”‚ (benchmark-mlop) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ Creates + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ PipelineRun โ”‚ + โ”‚ (auto-generated name)โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ Executes + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Pipeline โ”‚ + โ”‚ (benchmark-mlop) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ Runs Tasks + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Task 1 โ”‚ + โ”‚ (call-orchestrator) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ Then + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Task 2 โ”‚ + โ”‚ (poll-batch-status) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ Reads Config + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ ConfigMap โ”‚ + โ”‚ (benchmark-config) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ Uses URL + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Orchestrator API โ”‚ + โ”‚ POST /api/v1/ โ”‚ + โ”‚ mlops-batches โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +### Component Responsibilities + +1. **EventListener**: Accepts webhook (exposed as Kubernetes Service), validates request, triggers pipeline + - Service name: `el-benchmark-mlop-listener` + - Internal cluster access: `http://el-benchmark-mlop-listener..svc.cluster.local:8080` + - External testing: Use `oc port-forward` (see testing section) +2. **TriggerBinding**: Extracts parameters from webhook JSON payload (including MLOps params) +3. **TriggerTemplate**: Generates PipelineRun with extracted parameters +4. **Pipeline**: Orchestrates task execution, monitors completion, handles results +5. **Task 1 (call-orchestrator-api)**: Calls orchestrator MLOps API with DVC/S3 params +6. **Task 2 (poll-batch-status)**: Monitors batch completion until done or timeout +7. **ConfigMap**: Stores environment-specific configuration (orchestrator URL, API endpoint) + +### Production Deployment + +Deploy to dedicated namespace: + +```bash +# Create namespace +oc new-project sast-ai-benchmark + +# Find orchestrator URL +ORCH_URL=$(oc get svc -l app=sast-ai-orchestrator -n sast-ai-benchmark -o jsonpath='{.items[0].metadata.name}') +ORCH_PORT=$(oc get svc -l app=sast-ai-orchestrator -n sast-ai-benchmark -o jsonpath='{.items[0].spec.ports[0].port}') + +# Deploy MLOps pipeline overlay +cd deploy +make tasks ENV=mlop NAMESPACE=sast-ai-benchmark + +# Deploy EventListener +make eventlistener \ + ORCHESTRATOR_API_URL=http://${ORCH_URL}.sast-ai-benchmark.svc.cluster.local:${ORCH_PORT} \ + NAMESPACE=sast-ai-benchmark +``` + +This creates both: +- The `mlop-sast-ai-workflow-pipeline` that the orchestrator will trigger +- The EventListener webhook endpoint for triggering benchmarks + +**Note:** The Google Sheet URL is provided when triggering the EventListener via webhook, not during deployment. + +## ๐Ÿงน Cleanup + +To remove all MLOps benchmark resources: + +```bash +# From deploy directory - Recommended +cd deploy +make eventlistener-clean NAMESPACE=your-namespace + +# Or manual cleanup +oc delete -k deploy/tekton/eventlistener/ -n your-namespace + +# Or individually +oc delete eventlistener benchmark-mlop-listener -n your-namespace +oc delete pipeline benchmark-mlop-pipeline -n your-namespace +oc delete task call-orchestrator-api-mlop poll-batch-status-mlop -n your-namespace +oc delete configmap benchmark-config -n your-namespace +oc delete service el-benchmark-mlop-listener -n your-namespace +``` + +## ๐Ÿ“š Additional Resources + +- [Tekton Triggers Documentation](https://tekton.dev/docs/triggers/) +- [EventListener Guide](https://tekton.dev/docs/triggers/eventlisteners/) + +## ๐Ÿค For Project Forks + +If you're using this project as a base for your own: + +1. **Deploy** with your parameters: + ```bash + make eventlistener \ + ORCHESTRATOR_API_URL= \ + NAMESPACE= + ``` +2. **Customize** labels and naming if needed (edit YAML files in `tekton/eventlistener/`) +3. **Test** with your orchestrator instance using `test-eventlistener.sh` +4. **Extend** pipeline with your specific requirements + +All configuration is passed as parameters - no manual file editing needed! + +## โ“ Questions or Issues? + +- Check troubleshooting section above +- Review EventListener logs: `oc logs -l eventlistener=benchmark-mlop-listener` +- Review task logs: `oc logs -l tekton.dev/pipelineTask=call-orchestrator-api` +- Validate ConfigMap: `oc get cm benchmark-config -o yaml` +- Test orchestrator connectivity from a pod diff --git a/deploy/tekton/eventlistener/benchmark-config.yaml b/deploy/tekton/eventlistener/benchmark-config.yaml new file mode 100644 index 00000000..02d83bfa --- /dev/null +++ b/deploy/tekton/eventlistener/benchmark-config.yaml @@ -0,0 +1,26 @@ +# MLOps Benchmark Configuration +# +# This ConfigMap is automatically generated by 'make eventlistener'. +# Do not edit manually - regenerate using the Makefile. +# +# To regenerate: +# cd deploy +# make eventlistener \ +# ORCHESTRATOR_API_URL= \ +# NAMESPACE= +# +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: benchmark-config + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop +data: + # Orchestrator API base URL (cluster-internal service) + # REPLACE THIS with your actual orchestrator URL + orchestrator-api-url: "http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080" + + # API endpoint path for MLOps batches + api-batch-endpoint: "/api/v1/mlops-batches" diff --git a/deploy/tekton/eventlistener/benchmark-pipeline.yaml b/deploy/tekton/eventlistener/benchmark-pipeline.yaml new file mode 100644 index 00000000..e10c7b95 --- /dev/null +++ b/deploy/tekton/eventlistener/benchmark-pipeline.yaml @@ -0,0 +1,183 @@ +--- +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + name: benchmark-mlop-pipeline + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop +spec: + description: >- + MLOps benchmark pipeline for triggering batch SAST analysis via EventListener. + This pipeline calls the sast-ai-orchestrator MLOps API endpoint and monitors + batch completion. Designed for performance testing and MLOps workflows. + + params: + - name: batch-sheet-url + type: string + description: "Google Sheet URL with package list" + default: "" + + - name: submitted-by + type: string + description: "Trigger source identifier" + default: "eventlistener-webhook" + + # MLOps-specific parameters + - name: dvc-repo-url + type: string + description: "DVC repository URL for data versioning" + default: "" + + - name: dvc-data-version + type: string + description: "DVC data version tag" + default: "" + + - name: s3-endpoint-url + type: string + description: "S3 endpoint URL for MLOps data" + default: "" + + - name: s3-input-bucket-name + type: string + description: "S3 bucket name for input data" + default: "" + + - name: image-version + type: string + description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" + default: "latest" + + tasks: + - name: call-orchestrator-api + taskRef: + name: call-orchestrator-api-mlop + params: + - name: batch-sheet-url + value: $(params.batch-sheet-url) + - name: submitted-by + value: $(params.submitted-by) + - name: dvc-repo-url + value: $(params.dvc-repo-url) + - name: dvc-data-version + value: $(params.dvc-data-version) + - name: s3-endpoint-url + value: $(params.s3-endpoint-url) + - name: s3-input-bucket-name + value: $(params.s3-input-bucket-name) + - name: image-version + value: $(params.image-version) + + - name: poll-batch-status + taskRef: + name: poll-batch-status-mlop + runAfter: + - call-orchestrator-api + params: + - name: batch-id + value: $(tasks.call-orchestrator-api.results.batch-id) + - name: poll-interval + value: "30" + - name: timeout + value: "480" + + results: + - name: batch-id + description: "Orchestrator batch job ID" + value: $(tasks.call-orchestrator-api.results.batch-id) + + - name: trigger-status + description: "Batch trigger status" + value: $(tasks.call-orchestrator-api.results.status) + + - name: final-status + description: "Final batch completion status" + value: $(tasks.poll-batch-status.results.final-status) + + - name: total-jobs + description: "Total jobs in batch" + value: $(tasks.poll-batch-status.results.total-jobs) + + - name: completed-jobs + description: "Completed jobs count" + value: $(tasks.poll-batch-status.results.completed-jobs) + + - name: failed-jobs + description: "Failed jobs count" + value: $(tasks.poll-batch-status.results.failed-jobs) + + finally: + - name: log-completion + params: + - name: batch-id + value: $(tasks.call-orchestrator-api.results.batch-id) + - name: trigger-status + value: $(tasks.call-orchestrator-api.results.status) + - name: final-status + value: $(tasks.poll-batch-status.results.final-status) + - name: total-jobs + value: $(tasks.poll-batch-status.results.total-jobs) + - name: completed-jobs + value: $(tasks.poll-batch-status.results.completed-jobs) + - name: failed-jobs + value: $(tasks.poll-batch-status.results.failed-jobs) + taskSpec: + params: + - name: batch-id + - name: trigger-status + - name: final-status + - name: total-jobs + - name: completed-jobs + - name: failed-jobs + steps: + - name: log + image: registry.access.redhat.com/ubi9/ubi-minimal:latest + script: | + #!/bin/sh + echo "=========================================" + echo "Pipeline Execution Summary" + echo "=========================================" + echo "Batch ID: $(params.batch-id)" + echo "Trigger Status: $(params.trigger-status)" + echo "Final Status: $(params.final-status)" + echo "" + echo "Jobs Summary:" + echo " Total: $(params.total-jobs)" + echo " Completed: $(params.completed-jobs)" + echo " Failed: $(params.failed-jobs)" + echo "" + + if [ "$(params.final-status)" = "COMPLETED" ]; then + if [ "$(params.failed-jobs)" = "0" ]; then + echo "โœ“ All batch jobs completed successfully!" + echo "" + echo "Next steps:" + echo " 1. Review results in Google Drive" + echo " 2. Check individual job outputs" + echo " 3. Analyze metrics in MLflow (if configured)" + else + echo "โš  Batch completed but some jobs failed" + echo "" + echo "Troubleshooting failed jobs:" + echo " 1. Check orchestrator logs: oc logs -l app=sast-ai-orchestrator" + echo " 2. Review failed PipelineRuns: oc get pr -l status=failed" + echo " 3. Check individual job logs" + fi + elif [ "$(params.final-status)" = "TIMEOUT" ]; then + echo "โš  Batch monitoring timed out" + echo "" + echo "The batch may still be processing." + echo "Check status manually:" + echo " curl http://sast-ai-orchestrator/api/v1/job-batches/$(params.batch-id)" + else + echo "โœ— Batch failed or encountered an error" + echo "" + echo "Troubleshooting:" + echo " 1. Check orchestrator logs: oc logs -l app=sast-ai-orchestrator" + echo " 2. Verify ConfigMap: oc get cm benchmark-config -o yaml" + echo " 3. Check batch status: curl http://sast-ai-orchestrator/api/v1/job-batches/$(params.batch-id)" + fi + + echo "=========================================" + diff --git a/deploy/tekton/eventlistener/call-orchestrator-api.yaml b/deploy/tekton/eventlistener/call-orchestrator-api.yaml new file mode 100644 index 00000000..d1c5e50b --- /dev/null +++ b/deploy/tekton/eventlistener/call-orchestrator-api.yaml @@ -0,0 +1,185 @@ +--- +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: call-orchestrator-api-mlop + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop +spec: + description: >- + Calls the sast-ai-orchestrator MLOps batch API to trigger batch SAST + analysis jobs with DVC and S3 integration. Used for MLOps performance + testing and benchmark automation. + + params: + - name: batch-sheet-url + type: string + description: "Google Sheet URL containing the list of packages to analyze" + + - name: submitted-by + type: string + description: "Identifier of who/what triggered this batch" + default: "tekton-eventlistener-mlop" + + # MLOps-specific parameters + - name: dvc-repo-url + type: string + description: "DVC repository URL for data versioning" + default: "" + + - name: dvc-data-version + type: string + description: "DVC data version tag" + default: "" + + - name: s3-endpoint-url + type: string + description: "S3 endpoint URL for MLOps data" + default: "" + + - name: s3-input-bucket-name + type: string + description: "S3 input bucket name" + default: "" + + - name: image-version + type: string + description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" + default: "" + + results: + - name: batch-id + description: "Batch job ID returned by the orchestrator" + + - name: status + description: "Status of the API call (success/failed)" + + steps: + - name: call-orchestrator-api + image: quay.io/curl/curl:latest + env: + # Read orchestrator URL from ConfigMap + - name: ORCHESTRATOR_URL + valueFrom: + configMapKeyRef: + name: benchmark-config + key: orchestrator-api-url + + # Read API endpoint from ConfigMap + - name: API_ENDPOINT + valueFrom: + configMapKeyRef: + name: benchmark-config + key: api-batch-endpoint + + script: | + #!/bin/sh + set -e + + echo "=========================================" + echo "Calling Orchestrator MLOps Batch API" + echo "=========================================" + echo "" + + # Display configuration + echo "Configuration:" + echo " Orchestrator URL: $ORCHESTRATOR_URL" + echo " API Endpoint: $API_ENDPOINT" + echo " Batch Sheet URL: $(params.batch-sheet-url)" + echo " Submitted By: $(params.submitted-by)" + echo " DVC Repo: $(params.dvc-repo-url)" + echo " DVC Version: $(params.dvc-data-version)" + echo " S3 Endpoint: $(params.s3-endpoint-url)" + echo " S3 Bucket: $(params.s3-input-bucket-name)" + echo " Image Version: $(params.image-version)" + echo "" + + # Construct full API URL by concatenating base URL with endpoint + FULL_API_URL="${ORCHESTRATOR_URL}${API_ENDPOINT}" + echo "Full API URL: $FULL_API_URL" + echo "" + + # Prepare JSON payload for MLOps endpoint + # Note: API expects camelCase field names + PAYLOAD=$(cat <&1) || { + echo "ERROR: curl command failed" + echo "This could mean:" + echo " - Orchestrator service is not reachable" + echo " - Network policy blocking connection" + echo " - Orchestrator URL is incorrect in ConfigMap" + echo "" + echo "Response: $RESPONSE" + echo -n "failed" > $(results.status.path) + echo -n "error" > $(results.batch-id.path) + exit 1 + } + + # Extract HTTP status and body + HTTP_STATUS=$(echo "$RESPONSE" | grep "HTTP_STATUS:" | cut -d':' -f2) + BODY=$(echo "$RESPONSE" | sed '/HTTP_STATUS:/d') + + echo "HTTP Status: $HTTP_STATUS" + echo "Response Body:" + echo "$BODY" | sed 's/^/ /' + echo "" + + # Check if request was successful + if [ "$HTTP_STATUS" -ge 200 ] && [ "$HTTP_STATUS" -lt 300 ]; then + echo "โœ“ API call successful!" + + # Extract batchId from response + # API returns: {"batchId":1, ...} + if echo "$BODY" | grep -q "batchId"; then + # Extract batchId using grep and cut (works without jq) + BATCH_ID=$(echo "$BODY" | grep -o '"batchId":[0-9]*' | cut -d':' -f2 || echo "unknown") + echo "Batch ID: $BATCH_ID" + echo -n "$BATCH_ID" > $(results.batch-id.path) + else + echo "Warning: Could not extract batchId from response" + echo -n "unknown" > $(results.batch-id.path) + fi + + echo -n "success" > $(results.status.path) + + else + echo "โœ— API call failed with HTTP status: $HTTP_STATUS" + echo "" + echo "Possible issues:" + echo " - Orchestrator service returned an error" + echo " - Invalid payload format" + echo " - Batch sheet URL not accessible" + echo " - Missing required parameters" + echo "" + echo -n "failed" > $(results.status.path) + echo -n "error" > $(results.batch-id.path) + exit 1 + fi + + echo "" + echo "=========================================" + echo "Orchestrator API call completed" + echo "=========================================" + diff --git a/deploy/tekton/eventlistener/eventlistener.yaml b/deploy/tekton/eventlistener/eventlistener.yaml new file mode 100644 index 00000000..c799bcd2 --- /dev/null +++ b/deploy/tekton/eventlistener/eventlistener.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: triggers.tekton.dev/v1beta1 +kind: EventListener +metadata: + name: benchmark-mlop-listener + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop +spec: + serviceAccountName: pipeline + triggers: + - name: benchmark-mlop-trigger + bindings: + - ref: benchmark-mlop-binding + template: + ref: benchmark-mlop-template + +--- +apiVersion: v1 +kind: Service +metadata: + name: el-benchmark-mlop-listener + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop + eventlistener: benchmark-mlop-listener +spec: + type: ClusterIP + ports: + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + # Only use the eventlistener label that Tekton guarantees + # Kustomization commonLabels would override other labels + eventlistener: benchmark-mlop-listener + diff --git a/deploy/tekton/eventlistener/kustomization.yaml b/deploy/tekton/eventlistener/kustomization.yaml new file mode 100644 index 00000000..a42d73bc --- /dev/null +++ b/deploy/tekton/eventlistener/kustomization.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +# Namespace (can be overridden via kubectl -n or kustomize) +# namespace: sast-ai + +# Common labels applied to all resources +# Note: Don't use app.kubernetes.io/part-of as commonLabel +# because it conflicts with Tekton-managed labels on EventListener pods +commonLabels: + app.kubernetes.io/component: benchmark-mlop + +# Resources to deploy +resources: + - benchmark-config.yaml + - call-orchestrator-api.yaml + - poll-batch-status.yaml + - benchmark-pipeline.yaml + - triggerbinding.yaml + - triggertemplate.yaml + - eventlistener.yaml + +# ConfigMap generator (alternative to static file) +# Uncomment to generate ConfigMap from properties +# configMapGenerator: +# - name: benchmark-config +# literals: +# - orchestrator-api-url=http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 +# - default-batch-sheet-url=https://docs.google.com/spreadsheets/d/YOUR_SHEET_ID/edit +# - api-batch-endpoint=/api/v1/job-batches + diff --git a/deploy/tekton/eventlistener/poll-batch-status.yaml b/deploy/tekton/eventlistener/poll-batch-status.yaml new file mode 100644 index 00000000..6a4e5435 --- /dev/null +++ b/deploy/tekton/eventlistener/poll-batch-status.yaml @@ -0,0 +1,210 @@ +--- +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: poll-batch-status-mlop + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop +spec: + description: >- + Polls the orchestrator batch status API until the batch job completes + (status COMPLETED or FAILED) or timeout is reached. + + params: + - name: batch-id + type: string + description: "Batch ID to monitor (from previous task)" + + - name: poll-interval + type: string + description: "Seconds to wait between status checks" + default: "30" + + - name: timeout + type: string + description: "Maximum time to wait in minutes" + default: "480" + + results: + - name: final-status + description: "Final batch status (COMPLETED, FAILED, or TIMEOUT)" + + - name: total-jobs + description: "Total number of jobs in the batch" + + - name: completed-jobs + description: "Number of successfully completed jobs" + + - name: failed-jobs + description: "Number of failed jobs" + + steps: + - name: poll-status + image: quay.io/curl/curl:latest + env: + - name: ORCHESTRATOR_URL + valueFrom: + configMapKeyRef: + name: benchmark-config + key: orchestrator-api-url + + # Read API endpoint from ConfigMap + - name: API_ENDPOINT + valueFrom: + configMapKeyRef: + name: benchmark-config + key: api-batch-endpoint + + - name: BATCH_ID + value: $(params.batch-id) + + - name: POLL_INTERVAL + value: $(params.poll-interval) + + - name: TIMEOUT_MINUTES + value: $(params.timeout) + + script: | + #!/bin/sh + set -e + + echo "=========================================" + echo "Polling Batch Status" + echo "=========================================" + echo "" + echo "Configuration:" + echo " Orchestrator URL: $ORCHESTRATOR_URL" + echo " API Endpoint: $API_ENDPOINT" + echo " Batch ID: $BATCH_ID" + echo " Poll Interval: ${POLL_INTERVAL}s" + echo " Timeout: ${TIMEOUT_MINUTES} minutes" + echo "" + + # Calculate timeout in seconds + TIMEOUT_SECONDS=$((TIMEOUT_MINUTES * 60)) + START_TIME=$(date +%s) + + # Construct status API URL using endpoint from ConfigMap + STATUS_URL="${ORCHESTRATOR_URL}${API_ENDPOINT}/${BATCH_ID}" + echo "Status API URL: $STATUS_URL" + echo "" + + # Initialize counters + POLL_COUNT=0 + + # Poll loop + while true; do + POLL_COUNT=$((POLL_COUNT + 1)) + CURRENT_TIME=$(date +%s) + ELAPSED=$((CURRENT_TIME - START_TIME)) + + echo "----------------------------------------" + echo "Poll #${POLL_COUNT} (elapsed: ${ELAPSED}s)" + echo "----------------------------------------" + + # Check timeout + if [ $ELAPSED -ge $TIMEOUT_SECONDS ]; then + echo "โœ— TIMEOUT reached after ${TIMEOUT_MINUTES} minutes" + echo "" + echo "Batch did not complete within the timeout period." + echo "You can check the status manually:" + echo " curl $STATUS_URL" + echo "" + echo -n "TIMEOUT" > $(results.final-status.path) + echo -n "0" > $(results.total-jobs.path) + echo -n "0" > $(results.completed-jobs.path) + echo -n "0" > $(results.failed-jobs.path) + exit 1 + fi + + # Call status API + RESPONSE=$(curl -s -w "\nHTTP_STATUS:%{http_code}" "$STATUS_URL" 2>&1) || { + echo "ERROR: Failed to call status API" + sleep $POLL_INTERVAL + continue + } + + # Parse response + HTTP_STATUS=$(echo "$RESPONSE" | grep "HTTP_STATUS:" | cut -d':' -f2) + BODY=$(echo "$RESPONSE" | sed '/HTTP_STATUS:/d') + + if [ "$HTTP_STATUS" != "200" ]; then + echo "ERROR: API returned status $HTTP_STATUS" + echo "Response: $BODY" + sleep $POLL_INTERVAL + continue + fi + + # Extract status fields (without jq) + STATUS=$(echo "$BODY" | grep -o '"status":"[^"]*"' | cut -d'"' -f4) + TOTAL=$(echo "$BODY" | grep -o '"totalJobs":[0-9]*' | cut -d':' -f2) + COMPLETED=$(echo "$BODY" | grep -o '"completedJobs":[0-9]*' | cut -d':' -f2) + FAILED=$(echo "$BODY" | grep -o '"failedJobs":[0-9]*' | cut -d':' -f2) + + echo "Status: $STATUS" + echo "Progress: $COMPLETED/$TOTAL jobs completed ($FAILED failed)" + + # Check if batch is done + case "$STATUS" in + COMPLETED) + echo "" + echo "=========================================" + echo "โœ“ Batch COMPLETED Successfully!" + echo "=========================================" + echo "Total Jobs: $TOTAL" + echo "Completed: $COMPLETED" + echo "Failed: $FAILED" + echo "" + + if [ "$FAILED" -gt 0 ]; then + echo "โš  Warning: Some jobs failed ($FAILED out of $TOTAL)" + else + echo "โœ“ All jobs completed successfully!" + fi + + echo "" + echo "Total execution time: ${ELAPSED}s ($((ELAPSED / 60)) minutes)" + echo "=========================================" + + echo -n "COMPLETED" > $(results.final-status.path) + echo -n "$TOTAL" > $(results.total-jobs.path) + echo -n "$COMPLETED" > $(results.completed-jobs.path) + echo -n "$FAILED" > $(results.failed-jobs.path) + exit 0 + ;; + + FAILED) + echo "" + echo "=========================================" + echo "โœ— Batch FAILED" + echo "=========================================" + echo "Total Jobs: $TOTAL" + echo "Completed: $COMPLETED" + echo "Failed: $FAILED" + echo "" + echo "Check orchestrator logs for details:" + echo " oc logs -l app=sast-ai-orchestrator" + echo "=========================================" + + echo -n "FAILED" > $(results.final-status.path) + echo -n "$TOTAL" > $(results.total-jobs.path) + echo -n "$COMPLETED" > $(results.completed-jobs.path) + echo -n "$FAILED" > $(results.failed-jobs.path) + exit 1 + ;; + + PROCESSING|PENDING) + echo "Batch still processing... waiting ${POLL_INTERVAL}s" + echo "" + sleep $POLL_INTERVAL + ;; + + *) + echo "Unknown status: $STATUS" + echo "Full response: $BODY" + sleep $POLL_INTERVAL + ;; + esac + done + diff --git a/deploy/tekton/eventlistener/triggerbinding.yaml b/deploy/tekton/eventlistener/triggerbinding.yaml new file mode 100644 index 00000000..226a39e4 --- /dev/null +++ b/deploy/tekton/eventlistener/triggerbinding.yaml @@ -0,0 +1,45 @@ +--- +apiVersion: triggers.tekton.dev/v1beta1 +kind: TriggerBinding +metadata: + name: benchmark-mlop-binding + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop +spec: + params: + # Extract batch sheet URL from webhook payload + # For ArgoCD triggers, this will be empty (sheet created by pipeline) + # For direct webhook triggers, this can be provided + - name: batch-sheet-url + value: $(body.batch_sheet_url) + + # Extract submitter information + # ArgoCD: "argocd-prod-sync" + # Direct webhook: custom value + - name: submitted-by + value: $(body.submitted_by) + + # Extract source to determine trigger type + # "argocd" = triggered by ArgoCD notification + # empty/other = direct webhook trigger + - name: trigger-source + value: $(body.source) + + # MLOps-specific parameters (optional, for mlops-batches endpoint) + - name: dvc-repo-url + value: $(body.dvc_repo_url) + + - name: dvc-data-version + value: $(body.dvc_data_version) + + - name: s3-endpoint-url + value: $(body.s3_endpoint_url) + + - name: s3-input-bucket-name + value: $(body.s3_input_bucket_name) + + # Optional: Override workflow version for testing + - name: image-version + value: $(body.image_version) + diff --git a/deploy/tekton/eventlistener/triggertemplate.yaml b/deploy/tekton/eventlistener/triggertemplate.yaml new file mode 100644 index 00000000..5cd06112 --- /dev/null +++ b/deploy/tekton/eventlistener/triggertemplate.yaml @@ -0,0 +1,90 @@ +--- +apiVersion: triggers.tekton.dev/v1beta1 +kind: TriggerTemplate +metadata: + name: benchmark-mlop-template + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop +spec: + params: + # Parameters from TriggerBinding + - name: batch-sheet-url + description: "Google Sheet URL with package list" + default: "" + + - name: submitted-by + description: "Trigger source" + default: "eventlistener-webhook" + + - name: trigger-source + description: "Source of the trigger (argocd, webhook, etc.)" + default: "webhook" + + # MLOps-specific parameters + - name: dvc-repo-url + description: "DVC repository URL for data versioning" + default: "" + + - name: dvc-data-version + description: "DVC data version tag" + default: "" + + - name: s3-endpoint-url + description: "S3 endpoint URL for MLOps data" + default: "" + + - name: s3-input-bucket-name + description: "S3 bucket name for input data" + default: "" + + - name: image-version + description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" + default: "" + + resourcetemplates: + - apiVersion: tekton.dev/v1 + kind: PipelineRun + metadata: + # Use generateName for unique PipelineRun names + # Kubernetes will append random suffix: benchmark-mlop-pipeline-abc123 + generateName: benchmark-mlop-pipeline- + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop + sast-ai.redhat.com/trigger-type: webhook + tekton.dev/pipeline: benchmark-mlop-pipeline + spec: + pipelineRef: + name: benchmark-mlop-pipeline + + params: + # Pass parameters to pipeline + - name: batch-sheet-url + value: $(tt.params.batch-sheet-url) + + - name: submitted-by + value: $(tt.params.submitted-by) + + - name: trigger-source + value: $(tt.params.trigger-source) + + - name: dvc-repo-url + value: $(tt.params.dvc-repo-url) + + - name: dvc-data-version + value: $(tt.params.dvc-data-version) + + - name: s3-endpoint-url + value: $(tt.params.s3-endpoint-url) + + - name: s3-input-bucket-name + value: $(tt.params.s3-input-bucket-name) + + - name: image-version + value: $(tt.params.image-version) + + # Timeout for the entire pipeline + timeouts: + pipeline: "4h" + From e787c371a0f9fb92bf5340a06bac083cd1ae3f88 Mon Sep 17 00:00:00 2001 From: Yael Date: Sun, 2 Nov 2025 14:36:11 +0200 Subject: [PATCH 2/4] feat: adjust EventListener to integrate with new MLOps pipeline Update EventListener configuration and parameters to align with the refactored MLOps pipeline that uses DVC version parameters --- deploy/Makefile | 277 ++++++++----- deploy/tekton/eventlistener/README.md | 77 ++-- .../eventlistener/benchmark-config.yaml | 23 +- .../eventlistener/benchmark-pipeline.yaml | 52 +-- .../eventlistener/call-orchestrator-api.yaml | 54 +-- .../eventlistener/test-eventlistener.sh | 377 ++++++++++++++++++ .../tekton/eventlistener/triggerbinding.yaml | 37 +- .../tekton/eventlistener/triggertemplate.yaml | 70 ++-- .../tekton/overlays/mlops/kustomization.yaml | 3 + 9 files changed, 706 insertions(+), 264 deletions(-) create mode 100755 deploy/tekton/eventlistener/test-eventlistener.sh diff --git a/deploy/Makefile b/deploy/Makefile index ae562b45..dd882f26 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -10,7 +10,6 @@ NAMESPACE ?= $(shell oc config view --minify --output 'jsonpath={..namespace}') CO := oc --context $(CONTEXT) # Pipeline parameters (overrideable on the CLI): -REPO_REMOTE_URL ?= source/code/url HUMAN_VERIFIED_FILE_PATH ?= "" LLM_URL ?= http://<> @@ -22,7 +21,8 @@ PROJECT_NAME ?= project-name PROJECT_VERSION ?= project-version DOWNLOAD_REPO ?= false -REPO_REMOTE_URL ?= "" +REPO_REMOTE_URL ?= source/code/url +REPO_REMOTE_URL ?= source/code/url REPO_LOCAL_PATH ?= /path/to/repo INPUT_REPORT_FILE_PATH ?= http://<> @@ -56,6 +56,10 @@ S3_INPUT_BUCKET_NAME ?= test GITHUB_REPO_URL ?= https://github.com/RHEcosystemAppEng/sast-ai-workflow.git ARGOCD_NAMESPACE ?= sast-ai +# EventListener Configuration +ORCHESTRATOR_API_URL ?= +MLOPS_ORCHESTRATOR_API_URL ?= + # EventListener Configuration ORCHESTRATOR_API_URL ?= @@ -67,21 +71,51 @@ GOOGLE_SERVICE_ACCOUNT_JSON_PATH ?= ./service_account.json GCS_SERVICE_ACCOUNT_JSON_PATH ?= ./gcs_service_account.json DOCKER_CONFIG_PATH ?= $(HOME)/.config/containers/auth.json + # S3/Minio Configuration -S3_OUTPUT_BUCKET_NAME ?= "" +S3_OUTPUT_BUCKET_NAME ?= bucket-name AWS_ACCESS_KEY_ID ?= "" AWS_SECRET_ACCESS_KEY ?= "" S3_ENDPOINT_URL ?= "" +.PHONY: deploy deploy-dev deploy-prod deploy-mlops setup tasks-dev tasks-prod tasks-mlops secrets pipeline scripts configmaps run clean generate-prompts prompts argocd-deploy-dev argocd-deploy-prod argocd-clean eventlistener eventlistener-clean -.PHONY: deploy setup tasks secrets pipeline scripts configmaps run clean generate-prompts prompts argocd-deploy-mlops argocd-deploy-prod argocd-clean +.PHONY: deploy setup tasks-dev tasks-prod tasks-mlops secrets pipeline scripts configmaps run clean generate-prompts prompts argocd-deploy-prod argocd-clean eventlistener eventlistener-clean # Unified deploy command # Usage: # make deploy # Deploy base (Google Drive, :latest) # make deploy ENV=mlops # Deploy MLOps (S3/Minio, :latest) # make deploy ENV=prod IMAGE_VERSION=1.2.3 # Deploy prod (Google Drive, versioned) -deploy: - @if [ "$(ENV)" = "prod" ] && [ -z "$(IMAGE_VERSION)" ]; then \ +deploy: deploy-$(ENV) + +deploy-dev: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest +deploy-dev: setup-common tasks-dev argocd-deploy-dev + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo "๐Ÿš€ SAST AI Workflow - Development Deployment" + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo " Environment: Development" + @echo " Container Image: $(CONTAINER_IMAGE)" + @echo "" + @echo "โœ… Development deployment completed successfully!" + +deploy-prod: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):$(IMAGE_VERSION) +deploy-prod: setup tasks-prod argocd-deploy-prod + @if [ -z "$(IMAGE_VERSION)" ]; then \ +deploy: deploy-$(ENV) + +deploy-dev: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest +deploy-dev: setup tasks-dev + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo "๐Ÿš€ SAST AI Workflow - Development Deployment" + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo " Environment: Development" + @echo " Container Image: $(CONTAINER_IMAGE)" + @echo "" + @echo "โœ… Development deployment completed successfully!" + +deploy-prod: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):$(IMAGE_VERSION) +deploy-prod: setup tasks-prod argocd-deploy-prod + @if [ -z "$(IMAGE_VERSION)" ]; then \ echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ echo "โŒ ERROR: IMAGE_VERSION is required for production deployment"; \ echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ @@ -90,55 +124,100 @@ deploy: echo ""; \ echo "Available versions can be found at:"; \ echo "https://quay.io/repository/ecosystem-appeng/sast-ai-workflow?tab=tags"; \ + exit 1; \ + fi + +deploy-mlops: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest +deploy-mlops: setup tasks-mlops argocd-deploy-mlops + @if [ -z "$(MLOPS_ORCHESTRATOR_API_URL)" ]; then \ + echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ + echo "โŒ ERROR: MLOPS_ORCHESTRATOR_API_URL is required for MLOps deployment"; \ + echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ echo ""; \ + echo "Usage: make deploy-mlops MLOPS_ORCHESTRATOR_API_URL="; \ + exit 1; \ + fi + +deploy-mlops: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest +deploy-mlops: setup tasks-mlops argocd-deploy-mlops + @if [ -z "$(ORCHESTRATOR_API_URL)" ]; then \ + echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ + echo "โŒ ERROR: ORCHESTRATOR_API_URL is required for MLOps deployment"; \ + echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ + echo ""; \ + echo "Usage: make deploy-mlops ORCHESTRATOR_API_URL="; \ + echo ""; \ + echo "Example:"; \ + echo " make deploy-mlops MLOPS_ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ + echo "Example:"; \ + echo " make deploy-mlops ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ exit 1; \ fi @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @echo "๐Ÿš€ SAST AI Workflow - Deployment" + @echo "๐Ÿค– SAST AI Workflow - MLOps Benchmarking Deployment" + @echo "๐Ÿค– SAST AI Workflow - MLOps Benchmarking Deployment" @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @if [ "$(ENV)" = "mlops" ]; then \ - echo " Environment: MLOps"; \ - echo " Storage: S3/Minio output upload"; \ - echo " Container Image: $(IMAGE_REGISTRY)/$(IMAGE_NAME):latest"; \ - elif [ "$(ENV)" = "prod" ]; then \ - echo " Environment: Production"; \ - echo " Storage: Google Drive upload"; \ - echo " Container Image: $(IMAGE_REGISTRY)/$(IMAGE_NAME):$(IMAGE_VERSION)"; \ - else \ - echo " Environment: Base"; \ - echo " Storage: Google Drive upload"; \ - echo " Container Image: $(IMAGE_REGISTRY)/$(IMAGE_NAME):latest"; \ - fi + @echo " Environment: MLOps (Benchmarking)" + @echo " Container Image: $(CONTAINER_IMAGE)" + @echo " Orchestrator URL: $(MLOPS_ORCHESTRATOR_API_URL)" @echo "" - @if [ "$(ENV)" = "mlops" ]; then \ - $(MAKE) --no-print-directory ENV=mlops setup scripts tasks prompts configmaps argocd-deploy-mlops; \ - elif [ "$(ENV)" = "prod" ]; then \ - $(MAKE) --no-print-directory ENV=prod CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):$(IMAGE_VERSION) setup scripts tasks prompts configmaps argocd-deploy-prod; \ - else \ - $(MAKE) --no-print-directory setup scripts tasks prompts configmaps; \ - fi + @echo "๐ŸŽฏ Deploying EventListener..." + @sed -e 's|ORCHESTRATOR_API_URL_PLACEHOLDER|$(MLOPS_ORCHESTRATOR_API_URL)|g' \ + tekton/eventlistener/benchmark-config.yaml.example > tekton/eventlistener/benchmark-config.yaml + @$(CO) apply -k tekton/eventlistener/ -n $(NAMESPACE) || \ + { echo " โŒ Failed to deploy EventListener resources"; exit 1; } + @echo " โœ“ EventListener deployed" + @echo " Environment: MLOps (Benchmarking)" + @echo " Container Image: $(CONTAINER_IMAGE)" + @echo " Orchestrator URL: $(ORCHESTRATOR_API_URL)" + @echo "" + @echo "๐ŸŽฏ Deploying EventListener..." + @sed -e 's|ORCHESTRATOR_API_URL_PLACEHOLDER|$(ORCHESTRATOR_API_URL)|g' \ + tekton/eventlistener/benchmark-config.yaml.example > tekton/eventlistener/benchmark-config.yaml + @$(CO) apply -k tekton/eventlistener/ -n $(NAMESPACE) || \ + { echo " โŒ Failed to deploy EventListener resources"; exit 1; } + @echo " โœ“ EventListener deployed" + @echo "" + @echo "โœ… MLOps deployment completed successfully!" -setup: - @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @echo "๐Ÿš€ SAST AI Workflow - Infrastructure Setup" +setup: secrets scripts prompts configmaps + @echo "โœ… MLOps deployment completed successfully!" + +setup: secrets scripts prompts configmaps @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo "๐Ÿš€ Common Infrastructure Ready" + @echo "๐Ÿš€ Common Infrastructure Ready" @echo " Context: $(CONTEXT)" @echo " Namespace: $(NAMESPACE)" - @echo "" - @$(MAKE) --no-print-directory secrets -tasks: - @echo "๐Ÿ“‹ Setting up Tekton Resources..." - @if [ "$(ENV)" = "prod" ]; then \ - $(CO) apply -k tekton/overlays/prod -n $(NAMESPACE) && \ - echo " โœ“ Tekton resources deployed (production overlay)"; \ - elif [ "$(ENV)" = "mlops" ]; then \ - $(CO) apply -k tekton/overlays/mlops -n $(NAMESPACE) && \ - echo " โœ“ Tekton resources deployed (mlops overlay - S3 output storage)"; \ - else \ - $(CO) apply -k tekton/base -n $(NAMESPACE) && \ - echo " โœ“ Tekton resources deployed (base - Google Drive storage)"; \ - fi +tasks-dev: + @echo "๐Ÿ“‹ Deploying Tekton resources (dev)..." + @$(CO) apply -k tekton/base -n $(NAMESPACE) + @echo " โœ“ Base Tekton resources (base - Google Drive storage)" + +tasks-prod: + @echo "๐Ÿ“‹ Deploying Tekton resources (prod)..." + @$(CO) apply -k tekton/overlays/prod -n $(NAMESPACE) + @echo " โœ“ Production Tekton resources (versioned)" + +tasks-mlops: + @echo "๐Ÿ“‹ Deploying Tekton resources (mlops)..." + @$(CO) apply -k tekton/overlays/mlops -n $(NAMESPACE) + @echo " โœ“ MLOps Tekton resources (MinIO/S3)" +tasks-dev: + @echo "๐Ÿ“‹ Deploying Tekton resources (dev)..." + @$(CO) apply -k tekton/base -n $(NAMESPACE) + @echo " โœ“ Base Tekton resources (base - Google Drive storage)" + +tasks-prod: + @echo "๐Ÿ“‹ Deploying Tekton resources (prod)..." + @$(CO) apply -k tekton/overlays/prod -n $(NAMESPACE) + @echo " โœ“ Production Tekton resources (versioned)" + +tasks-mlops: + @echo "๐Ÿ“‹ Deploying Tekton resources (mlops)..." + @$(CO) apply -k tekton/overlays/mlops -n $(NAMESPACE) + @echo " โœ“ MLOps Tekton resources (MinIO/S3)" secrets: @echo "๐Ÿ” Configuring Secrets..." @@ -251,10 +330,6 @@ secrets: { echo " โŒ Failed to patch pipeline service account"; exit 1; } @echo " โœ“ Service account configured" -pipeline: - @echo "๐Ÿ”ง Pipeline..." - @echo " โœ“ Pipeline deployed with Tekton resources (via kustomize)" - scripts: @echo "๐Ÿ“œ Setting up Scripts..." @$(CO) apply -n $(NAMESPACE) -f tekton/scripts/upload_to_drive_cm.yaml || \ @@ -298,6 +373,7 @@ run: @echo " ๐Ÿ”„ Removing old pipeline runs..." @$(CO) delete pipelinerun sast-ai-workflow-pipelinerun \ -n $(NAMESPACE) --ignore-not-found + -n $(NAMESPACE) --ignore-not-found # Create PipelineRun with current parameters @sed \ -e 's|PROJECT_NAME_PLACEHOLDER|$(PROJECT_NAME)|g' \ @@ -367,68 +443,81 @@ argocd-clean: eventlistener: @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @echo "๐ŸŽฏ SAST AI Workflow - EventListener for MLOps Benchmarking" + @echo "๐ŸŽฏ EventListener Standalone Update" @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @echo " Context: $(CONTEXT)" - @echo " Namespace: $(NAMESPACE)" + @echo " โš ๏ธ Use 'make deploy-mlops' for full deployment" @echo "" - @echo "๐Ÿ“‹ Validating EventListener configuration..." @if [ -z "$(ORCHESTRATOR_API_URL)" ]; then \ - echo " โŒ ORCHESTRATOR_API_URL is required"; \ + echo "โŒ ERROR: ORCHESTRATOR_API_URL is required"; \ echo ""; \ echo "Usage:"; \ - echo " make eventlistener ORCHESTRATOR_API_URL= NAMESPACE="; \ + echo " make eventlistener ORCHESTRATOR_API_URL="; \ echo ""; \ echo "Example:"; \ - echo " make eventlistener \\"; \ - echo " ORCHESTRATOR_API_URL=http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 \\"; \ - echo " NAMESPACE=sast-ai"; \ - echo ""; \ - echo "Tip: Find your orchestrator URL with:"; \ - echo " oc get svc -l app=sast-ai-orchestrator"; \ - echo ""; \ + echo " make eventlistener ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ exit 1; \ fi - @echo " โœ“ Configuration validated" - @echo " โ€ข Orchestrator URL: $(ORCHESTRATOR_API_URL)" + @echo "๐ŸŽฏ Deploying EventListener..." + @sed -e 's|ORCHESTRATOR_API_URL_PLACEHOLDER|$(ORCHESTRATOR_API_URL)|g' \ + tekton/eventlistener/benchmark-config.yaml.example > tekton/eventlistener/benchmark-config.yaml + @$(CO) apply -k tekton/eventlistener/ -n $(NAMESPACE) || \ + { echo " โŒ Failed to deploy EventListener resources"; exit 1; } + @echo "" + @echo "โœ… EventListener updated" @echo "" - @echo "๐Ÿ”ง Generating benchmark-config.yaml..." - @cat > tekton/eventlistener/benchmark-config.yaml < /dev/null 2>&1 || true + @echo " โœ“ Benchmark PipelineRuns removed" + @echo " ๐Ÿ“‹ Cleaning benchmark TaskRuns..." + @$(CO) delete taskrun -l app.kubernetes.io/component=benchmark-mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true + @echo " โœ“ Benchmark TaskRuns removed" + @echo " ๐Ÿ—‘๏ธ Removing EventListener infrastructure..." + @$(CO) delete -k tekton/eventlistener/ -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true + @echo " โœ“ EventListener resources removed" + +eventlistener: + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo "๐ŸŽฏ EventListener Standalone Update" + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + @echo " โš ๏ธ Use 'make deploy-mlops' for full deployment" + @echo "" + @if [ -z "$(ORCHESTRATOR_API_URL)" ]; then \ + echo "โŒ ERROR: ORCHESTRATOR_API_URL is required"; \ + echo ""; \ + echo "Usage:"; \ + echo " make eventlistener ORCHESTRATOR_API_URL="; \ + echo ""; \ + echo "Example:"; \ + echo " make eventlistener ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ + exit 1; \ + fi + @echo "๐ŸŽฏ Deploying EventListener..." + @sed -e 's|ORCHESTRATOR_API_URL_PLACEHOLDER|$(ORCHESTRATOR_API_URL)|g' \ + tekton/eventlistener/benchmark-config.yaml.example > tekton/eventlistener/benchmark-config.yaml @$(CO) apply -k tekton/eventlistener/ -n $(NAMESPACE) || \ { echo " โŒ Failed to deploy EventListener resources"; exit 1; } - @echo " โœ“ EventListener deployed" - @echo "" - @echo "โœ… EventListener deployment completed!" @echo "" - @echo "๐Ÿ“Š Verify deployment:" - @echo " oc get eventlistener,task,pipeline,cm -l app.kubernetes.io/component=benchmark-mlop -n $(NAMESPACE)" + @echo "โœ… EventListener updated" @echo "" - @echo "๐Ÿงช Test EventListener:" - @echo " cd tekton/eventlistener && ./test-eventlistener.sh" + @echo "๐Ÿ“Š Verify: oc get eventlistener,task,pipeline -l app.kubernetes.io/component=benchmark-mlop -n $(NAMESPACE)" + @echo "๐Ÿงช Test: cd tekton/eventlistener && ./test-eventlistener.sh" @echo "" eventlistener-clean: @echo "๐Ÿงน Removing EventListener resources..." + @echo " ๐Ÿƒ Cleaning benchmark PipelineRuns..." + @$(CO) delete pipelinerun -l app.kubernetes.io/component=benchmark-mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true + @echo " โœ“ Benchmark PipelineRuns removed" + @echo " ๐Ÿ“‹ Cleaning benchmark TaskRuns..." + @$(CO) delete taskrun -l app.kubernetes.io/component=benchmark-mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true + @echo " โœ“ Benchmark TaskRuns removed" + @echo " ๐Ÿ—‘๏ธ Removing EventListener infrastructure..." @$(CO) delete -k tekton/eventlistener/ -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true @echo " โœ“ EventListener resources removed" @@ -457,6 +546,9 @@ clean: elif [ "$(ENV)" = "mlop" ]; then \ $(CO) delete -k tekton/overlays/mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ echo " โœ“ MLOp Tekton resources removed (kustomize overlay)"; \ + elif [ "$(ENV)" = "mlop" ]; then \ + $(CO) delete -k tekton/overlays/mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ + echo " โœ“ MLOp Tekton resources removed (kustomize overlay)"; \ else \ $(CO) delete -k tekton/base -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ echo " โœ“ Base Tekton resources removed (kustomize base)"; \ @@ -516,7 +608,8 @@ clean: @echo "๐Ÿ” Removing Secrets..." @$(CO) delete secret sast-ai-gitlab-token \ sast-ai-default-llm-creds \ - sast-ai-google-drive-service-account \ + sast-ai-google-service-account \ + sast-ai-google-service-account \ sast-ai-gcs-service-account \ sast-ai-s3-output-credentials \ sast-ai-quay-registry-config \ diff --git a/deploy/tekton/eventlistener/README.md b/deploy/tekton/eventlistener/README.md index 6b571c05..99f5251e 100644 --- a/deploy/tekton/eventlistener/README.md +++ b/deploy/tekton/eventlistener/README.md @@ -1,6 +1,6 @@ # Tekton EventListener for MLOps Benchmarking -This directory contains a Tekton EventListener implementation that triggers the sast-ai-orchestrator MLOps batch API via webhook. This enables automated MLOps performance testing and benchmarking with DVC data versioning and S3 integration. +This directory contains a Tekton EventListener implementation that triggers the sast-ai-orchestrator MLOps batch API via webhook. This enables automated MLOps performance testing and benchmarking with DVC data versioning. ## ๐ŸŽฏ Purpose @@ -8,7 +8,6 @@ Enable MLOps benchmark testing for batch SAST analysis jobs: - โœ… Webhook-based triggering (curl/HTTP POST) - โœ… Integration with sast-ai-orchestrator MLOps API (`/api/v1/mlops-batches`) - โœ… DVC data versioning support -- โœ… S3 object storage integration - โœ… Container image version testing - โœ… Separation from production workflows - โœ… Fork-friendly configuration @@ -90,8 +89,6 @@ make eventlistener \ - โœ… Deploys all EventListener resources via Kustomize - โœ… Shows verification and testing commands -**Note:** The Google Sheet URL is provided via the webhook payload when triggering the EventListener, not during deployment. - **Note:** The EventListener always calls `/api/v1/mlops-batches` endpoint (hardcoded for MLOps benchmarking). Verify deployment: @@ -118,12 +115,11 @@ http://el-benchmark-mlop-listener..svc.cluster.local:8080 curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ -d '{ - "batch_sheet_url": "https://docs.google.com/spreadsheets/d/YOUR_TEST_SHEET/edit", "submitted_by": "manual-test", - "dvc_repo_url": "https://gitlab.com/your-org/dvc-repo.git", - "dvc_data_version": "v1.0.0", - "s3_endpoint_url": "https://s3.amazonaws.com", - "s3_input_bucket_name": "mlops-test-data" + "image_version": "v2.1.0", + "dvc_nvr_version": "v1.0.0", + "dvc_known_false_positives_version": "v1.0.0", + "dvc_prompts_version": "v1.0.0" }' ``` @@ -132,13 +128,11 @@ curl -X POST http://localhost:8080 \ curl -X POST http://localhost:8080 \ -H 'Content-Type: application/json' \ -d '{ - "batch_sheet_url": "https://docs.google.com/spreadsheets/d/YOUR_TEST_SHEET/edit", "submitted_by": "version-test", - "dvc_repo_url": "https://gitlab.com/your-org/dvc-repo.git", - "dvc_data_version": "v1.0.0", - "s3_endpoint_url": "https://s3.amazonaws.com", - "s3_input_bucket_name": "mlops-test-data", - "image_version": "v2.1.0" + "image_version": "v2.1.0", + "dvc_nvr_version": "v1.0.0", + "dvc_known_false_positives_version": "v1.0.0", + "dvc_prompts_version": "v1.0.0" }' ``` @@ -182,9 +176,10 @@ Calling Orchestrator MLOps Batch API Configuration: Orchestrator URL: http://sast-ai-orchestrator... API Endpoint: /api/v1/mlops-batches (MLOps benchmarking) - Batch Sheet URL: https://docs.google.com/... - DVC Repo: https://gitlab.com/... - S3 Bucket: mlops-test-data + Image Version: v2.1.0 + DVC NVR Version: v1.0.0 + DVC Prompts Version: v1.0.0 + DVC Known False Positives Version: v1.0.0 ... โœ“ API call successful! Batch ID: batch-12345 @@ -220,7 +215,7 @@ oc logs -l tekton.dev/pipelineTask=call-orchestrator-api --tail=100 - Orchestrator URL incorrect in ConfigMap - Orchestrator service not running: `oc get pods -l app=sast-ai-orchestrator` - Network policy blocking connections -- Google Sheet URL not accessible by orchestrator +- DVC version parameters not provided in webhook payload #### Verify ConfigMap @@ -243,26 +238,22 @@ Send JSON payload with these fields: ```json { - "batch_sheet_url": "https://docs.google.com/spreadsheets/d/SHEET_ID/edit", "submitted_by": "trigger-source", - "dvc_repo_url": "https://gitlab.com/org/dvc-repo.git", - "dvc_data_version": "v1.2.3", - "s3_endpoint_url": "https://s3.amazonaws.com", - "s3_input_bucket_name": "mlops-data", + "dvc_nvr_version": "v1.2.3", + "dvc_known_false_positives_version": "v1.2.3", + "dvc_prompts_version": "v1.2.3", "image_version": "v2.0.0" } ``` **Required Fields:** -- `batch_sheet_url` - Google Sheet with package list -- `dvc_repo_url` - DVC repository URL -- `dvc_data_version` - DVC data version tag -- `s3_endpoint_url` - S3 endpoint URL -- `s3_input_bucket_name` - S3 bucket name +- `dvc_nvr_version` - DVC NVR resource version +- `dvc_prompts_version` - DVC prompts resource version +- `dvc_known_false_positives_version` - DVC known false positives resource version **Optional Fields:** - `submitted_by` - Defaults to "eventlistener-webhook" -- `image_version` - Override workflow version for testing (e.g., "v2.1.0", "sha-abc123") +- `image_version` - Defaults to "latest" (e.g., "v2.1.0", "sha-abc123") ### ConfigMap Keys @@ -281,13 +272,11 @@ The `benchmark-config` ConfigMap is automatically generated by `make eventlisten | Parameter | Type | Required | Default | Description | |-----------|------|----------|---------|-------------| -| `batch-sheet-url` | string | Yes | - | Google Sheet with package list | +| `dvc-nvr-version` | string | **Yes** | - | DVC NVR resource version | +| `dvc-prompts-version` | string | **Yes** | - | DVC prompts resource version | +| `dvc-known-false-positives-version` | string | **Yes** | - | DVC known false positives resource version | | `submitted-by` | string | No | `eventlistener-webhook` | Trigger source identifier | -| `dvc-repo-url` | string | Yes | - | DVC repository URL for data versioning | -| `dvc-data-version` | string | Yes | - | DVC data version tag | -| `s3-endpoint-url` | string | Yes | - | S3 endpoint URL | -| `s3-input-bucket-name` | string | Yes | - | S3 bucket name for input data | -| `image-version` | string | No | (default from pipeline) | Workflow image version for testing (tag only, e.g., "v2.1.0") | +| `image-version` | string | No | `latest` | Workflow image version for testing (tag only, e.g., "v2.1.0") | ## ๐ŸŽ“ Understanding the Architecture @@ -339,6 +328,7 @@ The `benchmark-config` ConfigMap is automatically generated by `make eventlisten โ”‚ Orchestrator API โ”‚ โ”‚ POST /api/v1/ โ”‚ โ”‚ mlops-batches โ”‚ + โ”‚ (with DVC versions) โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ ``` @@ -351,10 +341,21 @@ The `benchmark-config` ConfigMap is automatically generated by `make eventlisten 2. **TriggerBinding**: Extracts parameters from webhook JSON payload (including MLOps params) 3. **TriggerTemplate**: Generates PipelineRun with extracted parameters 4. **Pipeline**: Orchestrates task execution, monitors completion, handles results -5. **Task 1 (call-orchestrator-api)**: Calls orchestrator MLOps API with DVC/S3 params +5. **Task 1 (call-orchestrator-api)**: Calls orchestrator MLOps API with DVC version params 6. **Task 2 (poll-batch-status)**: Monitors batch completion until done or timeout 7. **ConfigMap**: Stores environment-specific configuration (orchestrator URL, API endpoint) +## ๐Ÿ”„ Production Enhancements + +For production use, consider: + +### Automation + +1. **Create CronJob** for scheduled benchmarking +2. **Set up monitoring** (Prometheus metrics) +3. **Configure notifications** (Slack/email on completion/failure) +4. **Add retry logic** for transient failures + ### Production Deployment Deploy to dedicated namespace: @@ -381,8 +382,6 @@ This creates both: - The `mlop-sast-ai-workflow-pipeline` that the orchestrator will trigger - The EventListener webhook endpoint for triggering benchmarks -**Note:** The Google Sheet URL is provided when triggering the EventListener via webhook, not during deployment. - ## ๐Ÿงน Cleanup To remove all MLOps benchmark resources: diff --git a/deploy/tekton/eventlistener/benchmark-config.yaml b/deploy/tekton/eventlistener/benchmark-config.yaml index 02d83bfa..71f11281 100644 --- a/deploy/tekton/eventlistener/benchmark-config.yaml +++ b/deploy/tekton/eventlistener/benchmark-config.yaml @@ -1,13 +1,19 @@ -# MLOps Benchmark Configuration +# MLOps Benchmark Configuration Example # -# This ConfigMap is automatically generated by 'make eventlistener'. -# Do not edit manually - regenerate using the Makefile. +# This is an EXAMPLE file for reference only. +# The actual benchmark-config.yaml is automatically generated by the Makefile. # -# To regenerate: +# Recommended deployment method: # cd deploy # make eventlistener \ -# ORCHESTRATOR_API_URL= \ -# NAMESPACE= +# ORCHESTRATOR_API_URL=http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 \ +# NAMESPACE=your-namespace +# +# Finding your orchestrator URL: +# oc get svc -l app=sast-ai-orchestrator +# +# Note: The Google Sheet URL is provided via webhook payload when triggering, +# not in this ConfigMap. # --- apiVersion: v1 @@ -19,8 +25,9 @@ metadata: app.kubernetes.io/component: benchmark-mlop data: # Orchestrator API base URL (cluster-internal service) - # REPLACE THIS with your actual orchestrator URL - orchestrator-api-url: "http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080" + # This will be replaced by the Makefile with your actual orchestrator URL + orchestrator-api-url: "http://sast-ai-orchestrator" # API endpoint path for MLOps batches api-batch-endpoint: "/api/v1/mlops-batches" + diff --git a/deploy/tekton/eventlistener/benchmark-pipeline.yaml b/deploy/tekton/eventlistener/benchmark-pipeline.yaml index e10c7b95..d0787663 100644 --- a/deploy/tekton/eventlistener/benchmark-pipeline.yaml +++ b/deploy/tekton/eventlistener/benchmark-pipeline.yaml @@ -13,61 +13,51 @@ spec: batch completion. Designed for performance testing and MLOps workflows. params: - - name: batch-sheet-url - type: string - description: "Google Sheet URL with package list" - default: "" - - name: submitted-by type: string description: "Trigger source identifier" default: "eventlistener-webhook" - # MLOps-specific parameters - - name: dvc-repo-url + - name: image-version type: string - description: "DVC repository URL for data versioning" - default: "" + description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" + default: "latest" - - name: dvc-data-version + # DVC version parameters (required) + - name: dvc-nvr-version type: string - description: "DVC data version tag" - default: "" + description: "DVC NVR resource version" - - name: s3-endpoint-url + - name: dvc-prompts-version type: string - description: "S3 endpoint URL for MLOps data" - default: "" + description: "DVC prompts resource version" - - name: s3-input-bucket-name + - name: dvc-known-false-positives-version type: string - description: "S3 bucket name for input data" - default: "" + description: "DVC known false positives resource version" - - name: image-version + - name: use-known-false-positive-file type: string - description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" - default: "latest" + description: "Whether to use known false positive file" + default: "true" tasks: - name: call-orchestrator-api taskRef: name: call-orchestrator-api-mlop params: - - name: batch-sheet-url - value: $(params.batch-sheet-url) - name: submitted-by value: $(params.submitted-by) - - name: dvc-repo-url - value: $(params.dvc-repo-url) - - name: dvc-data-version - value: $(params.dvc-data-version) - - name: s3-endpoint-url - value: $(params.s3-endpoint-url) - - name: s3-input-bucket-name - value: $(params.s3-input-bucket-name) - name: image-version value: $(params.image-version) + - name: dvc-nvr-version + value: $(params.dvc-nvr-version) + - name: dvc-prompts-version + value: $(params.dvc-prompts-version) + - name: dvc-known-false-positives-version + value: $(params.dvc-known-false-positives-version) + - name: use-known-false-positive-file + value: $(params.use-known-false-positive-file) - name: poll-batch-status taskRef: diff --git a/deploy/tekton/eventlistener/call-orchestrator-api.yaml b/deploy/tekton/eventlistener/call-orchestrator-api.yaml index d1c5e50b..3484a72d 100644 --- a/deploy/tekton/eventlistener/call-orchestrator-api.yaml +++ b/deploy/tekton/eventlistener/call-orchestrator-api.yaml @@ -13,40 +13,33 @@ spec: testing and benchmark automation. params: - - name: batch-sheet-url - type: string - description: "Google Sheet URL containing the list of packages to analyze" - - name: submitted-by type: string description: "Identifier of who/what triggered this batch" default: "tekton-eventlistener-mlop" - # MLOps-specific parameters - - name: dvc-repo-url + - name: image-version type: string - description: "DVC repository URL for data versioning" - default: "" + description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" + default: "latest" - - name: dvc-data-version + # DVC version parameters (required) + - name: dvc-nvr-version type: string - description: "DVC data version tag" - default: "" + description: "DVC NVR resource version" - - name: s3-endpoint-url + - name: dvc-prompts-version type: string - description: "S3 endpoint URL for MLOps data" - default: "" + description: "DVC prompts resource version" - - name: s3-input-bucket-name + - name: dvc-known-false-positives-version type: string - description: "S3 input bucket name" - default: "" + description: "DVC known false positives resource version" - - name: image-version + - name: use-known-false-positive-file type: string - description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" - default: "" + description: "Whether to use known false positive file" + default: "true" results: - name: batch-id @@ -86,13 +79,12 @@ spec: echo "Configuration:" echo " Orchestrator URL: $ORCHESTRATOR_URL" echo " API Endpoint: $API_ENDPOINT" - echo " Batch Sheet URL: $(params.batch-sheet-url)" echo " Submitted By: $(params.submitted-by)" - echo " DVC Repo: $(params.dvc-repo-url)" - echo " DVC Version: $(params.dvc-data-version)" - echo " S3 Endpoint: $(params.s3-endpoint-url)" - echo " S3 Bucket: $(params.s3-input-bucket-name)" echo " Image Version: $(params.image-version)" + echo " DVC NVR Version: $(params.dvc-nvr-version)" + echo " DVC Prompts Version: $(params.dvc-prompts-version)" + echo " DVC Known False Positives Version: $(params.dvc-known-false-positives-version)" + echo " Use Known False Positive File: $(params.use-known-false-positive-file)" echo "" # Construct full API URL by concatenating base URL with endpoint @@ -104,14 +96,12 @@ spec: # Note: API expects camelCase field names PAYLOAD=$(cat </dev/null 2>&1 +} + +# Function to check prerequisites +check_prerequisites() { + echo -e "${YELLOW}Checking prerequisites...${NC}" + + # Check for oc or kubectl + if command_exists oc; then + KUBECTL="oc" + echo -e "${GREEN}โœ“${NC} Found oc CLI" + elif command_exists kubectl; then + KUBECTL="kubectl" + echo -e "${GREEN}โœ“${NC} Found kubectl CLI" + else + echo -e "${RED}โœ—${NC} Neither oc nor kubectl found. Please install OpenShift or Kubernetes CLI." + exit 1 + fi + + # Check for curl + if ! command_exists curl; then + echo -e "${RED}โœ—${NC} curl not found. Please install curl." + exit 1 + fi + echo -e "${GREEN}โœ“${NC} Found curl" + + # Check for jq (optional) + if command_exists jq; then + echo -e "${GREEN}โœ“${NC} Found jq (for JSON parsing)" + HAS_JQ=true + else + echo -e "${YELLOW}โš ${NC} jq not found (optional, for pretty JSON output)" + HAS_JQ=false + fi + + # Check for tkn (optional) + if command_exists tkn; then + echo -e "${GREEN}โœ“${NC} Found tkn CLI (for watching PipelineRuns)" + HAS_TKN=true + else + echo -e "${YELLOW}โš ${NC} tkn CLI not found (optional, for easier pipeline monitoring)" + HAS_TKN=false + fi + + echo "" +} + +# Function to check if resources are deployed +check_deployment() { + echo -e "${YELLOW}Checking if EventListener resources are deployed...${NC}" + + # Check ConfigMap + if $KUBECTL get configmap benchmark-config -n "$NAMESPACE" >/dev/null 2>&1; then + echo -e "${GREEN}โœ“${NC} ConfigMap 'benchmark-config' exists" + else + echo -e "${RED}โœ—${NC} ConfigMap 'benchmark-config' not found" + echo "" + echo "Please deploy the EventListener resources first:" + echo " cd deploy" + echo " make eventlistener ORCHESTRATOR_API_URL= NAMESPACE=$NAMESPACE" + exit 1 + fi + + # Check Tasks + if $KUBECTL get task call-orchestrator-api-mlop -n "$NAMESPACE" >/dev/null 2>&1; then + echo -e "${GREEN}โœ“${NC} Task 'call-orchestrator-api-mlop' exists" + else + echo -e "${RED}โœ—${NC} Task 'call-orchestrator-api-mlop' not found" + exit 1 + fi + + if $KUBECTL get task poll-batch-status-mlop -n "$NAMESPACE" >/dev/null 2>&1; then + echo -e "${GREEN}โœ“${NC} Task 'poll-batch-status-mlop' exists" + else + echo -e "${RED}โœ—${NC} Task 'poll-batch-status-mlop' not found" + exit 1 + fi + + # Check Pipeline + if $KUBECTL get pipeline benchmark-mlop-pipeline -n "$NAMESPACE" >/dev/null 2>&1; then + echo -e "${GREEN}โœ“${NC} Pipeline 'benchmark-mlop-pipeline' exists" + else + echo -e "${RED}โœ—${NC} Pipeline 'benchmark-mlop-pipeline' not found" + exit 1 + fi + + # Check EventListener + if $KUBECTL get eventlistener benchmark-mlop-listener -n "$NAMESPACE" >/dev/null 2>&1; then + echo -e "${GREEN}โœ“${NC} EventListener 'benchmark-mlop-listener' exists" + else + echo -e "${RED}โœ—${NC} EventListener 'benchmark-mlop-listener' not found" + exit 1 + fi + + # Check Service + if $KUBECTL get service "$SERVICE_NAME" -n "$NAMESPACE" >/dev/null 2>&1; then + echo -e "${GREEN}โœ“${NC} Service '$SERVICE_NAME' exists" + else + echo -e "${RED}โœ—${NC} Service '$SERVICE_NAME' not found" + exit 1 + fi + + # Check if EventListener pod is running + POD_NAME=$($KUBECTL get pods -n "$NAMESPACE" -l eventlistener=benchmark-mlop-listener -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") + if [ -n "$POD_NAME" ]; then + POD_STATUS=$($KUBECTL get pod "$POD_NAME" -n "$NAMESPACE" -o jsonpath='{.status.phase}') + if [ "$POD_STATUS" = "Running" ]; then + echo -e "${GREEN}โœ“${NC} EventListener pod is running: $POD_NAME" + else + echo -e "${YELLOW}โš ${NC} EventListener pod status: $POD_STATUS" + fi + else + echo -e "${YELLOW}โš ${NC} EventListener pod not found (may still be starting)" + fi + + echo "" +} + +# Function to display ConfigMap configuration +show_config() { + echo -e "${YELLOW}Current Configuration:${NC}" + $KUBECTL get configmap benchmark-config -n "$NAMESPACE" -o jsonpath='{.data}' | grep -o '"[^"]*"[[:space:]]*:[[:space:]]*"[^"]*"' | sed 's/^/ /' || echo " (unable to read)" + echo "" +} + +# Function to start port-forward +start_port_forward() { + echo -e "${YELLOW}Starting port-forward to EventListener...${NC}" + echo "This will forward localhost:$LOCAL_PORT -> $SERVICE_NAME:8080" + echo "" + echo -e "${BLUE}Port-forward command:${NC}" + echo " $KUBECTL port-forward svc/$SERVICE_NAME $LOCAL_PORT:8080 -n $NAMESPACE" + echo "" + echo -e "${YELLOW}Note: Keep this terminal open. Use another terminal to send test requests.${NC}" + echo "" + + # Start port-forward + $KUBECTL port-forward "svc/$SERVICE_NAME" "$LOCAL_PORT:8080" -n "$NAMESPACE" +} + +# Function to send test request +send_test_request() { + echo -e "${YELLOW}Sending test request to EventListener...${NC}" + + # Generate timestamp for unique identification + TIMESTAMP=$(date +%Y%m%d-%H%M%S) + + # Prepare payload with all MLOps parameters (using underscores to match TriggerBinding) + PAYLOAD=$(cat <&1) + + # Parse response + HTTP_STATUS=$(echo "$RESPONSE" | grep "HTTP_STATUS:" | cut -d':' -f2) + BODY=$(echo "$RESPONSE" | sed '/HTTP_STATUS:/d') + + echo "Response Status: $HTTP_STATUS" + echo "Response Body:" + if [ "$HAS_JQ" = true ] && echo "$BODY" | jq '.' >/dev/null 2>&1; then + echo "$BODY" | jq '.' + else + echo "$BODY" + fi + echo "" + + if [ "$HTTP_STATUS" -ge 200 ] && [ "$HTTP_STATUS" -lt 300 ]; then + echo -e "${GREEN}โœ“ EventListener accepted the request!${NC}" + echo "" + echo "A PipelineRun should have been created. Check with:" + if [ "$HAS_TKN" = true ]; then + echo " tkn pipelinerun list -n $NAMESPACE" + echo " tkn pipelinerun logs -L -f -n $NAMESPACE" + else + echo " $KUBECTL get pipelinerun -n $NAMESPACE" + echo " $KUBECTL get pipelinerun -n $NAMESPACE -l app.kubernetes.io/component=benchmark-mlop" + fi + else + echo -e "${RED}โœ— EventListener returned an error${NC}" + echo "Check EventListener logs:" + echo " $KUBECTL logs -l eventlistener=benchmark-mlop-listener -n $NAMESPACE" + fi + echo "" +} + +# Function to watch pipeline runs +watch_pipelineruns() { + echo -e "${YELLOW}Watching recent benchmark PipelineRuns...${NC}" + echo "" + + if [ "$HAS_TKN" = true ]; then + echo "Using tkn to watch PipelineRuns:" + tkn pipelinerun list -n "$NAMESPACE" -l app.kubernetes.io/component=benchmark-mlop + echo "" + echo "To follow logs of the latest run:" + echo " tkn pipelinerun logs -L -f -n $NAMESPACE" + else + echo "Recent benchmark PipelineRuns:" + $KUBECTL get pipelinerun -n "$NAMESPACE" -l app.kubernetes.io/component=benchmark-mlop --sort-by=.metadata.creationTimestamp + echo "" + echo "To view logs:" + echo " $KUBECTL logs -l tekton.dev/pipelineTask=call-orchestrator-api -n $NAMESPACE --tail=100" + fi + echo "" + + echo -e "${YELLOW}Query PipelineRuns by trigger source:${NC}" + echo " # Manual tests:" + echo " $KUBECTL get pr -n $NAMESPACE -l sast-ai.redhat.com/trigger-source=manual-test" + echo "" + echo " # ArgoCD triggers:" + echo " $KUBECTL get pr -n $NAMESPACE -l sast-ai.redhat.com/trigger-source=argocd" + echo "" + echo " # All benchmark runs:" + echo " $KUBECTL get pr -n $NAMESPACE -l app.kubernetes.io/component=benchmark-mlop" + echo "" + + echo -e "${YELLOW}To clean up test PipelineRuns:${NC}" + echo " cd ../.. && make eventlistener-clean NAMESPACE=$NAMESPACE" + echo "" +} + +# Main menu +show_menu() { + echo -e "${BLUE}=========================================${NC}" + echo -e "${BLUE}What would you like to do?${NC}" + echo -e "${BLUE}=========================================${NC}" + echo "1. Check deployment and configuration" + echo "2. Start port-forward (keep terminal open)" + echo "3. Send test request (requires port-forward in another terminal)" + echo "4. Watch PipelineRuns" + echo "5. Show current configuration" + echo "6. Full test (port-forward in background, send request, watch)" + echo "0. Exit" + echo "" + read -p "Enter choice [0-6]: " choice + echo "" + + case $choice in + 1) + check_deployment + show_config + read -p "Press Enter to continue..." + show_menu + ;; + 2) + start_port_forward + ;; + 3) + send_test_request + read -p "Press Enter to continue..." + show_menu + ;; + 4) + watch_pipelineruns + read -p "Press Enter to continue..." + show_menu + ;; + 5) + show_config + read -p "Press Enter to continue..." + show_menu + ;; + 6) + check_deployment + show_config + echo -e "${YELLOW}Starting port-forward in background...${NC}" + $KUBECTL port-forward "svc/$SERVICE_NAME" "$LOCAL_PORT:8080" -n "$NAMESPACE" & + PF_PID=$! + sleep 3 + send_test_request + sleep 2 + watch_pipelineruns + kill $PF_PID 2>/dev/null || true + echo -e "${GREEN}Port-forward stopped${NC}" + ;; + 0) + echo "Exiting..." + exit 0 + ;; + *) + echo -e "${RED}Invalid choice${NC}" + show_menu + ;; + esac +} + +# Main execution +check_prerequisites + +# If script is run with argument, execute that action directly +if [ $# -gt 0 ]; then + case "$1" in + check|status) + check_deployment + show_config + ;; + port-forward|pf) + check_deployment + start_port_forward + ;; + test|trigger) + send_test_request + ;; + watch|logs) + watch_pipelineruns + ;; + *) + echo "Usage: $0 [check|port-forward|test|watch]" + exit 1 + ;; + esac +else + # Interactive mode + check_deployment + show_menu +fi + diff --git a/deploy/tekton/eventlistener/triggerbinding.yaml b/deploy/tekton/eventlistener/triggerbinding.yaml index 226a39e4..fea5860e 100644 --- a/deploy/tekton/eventlistener/triggerbinding.yaml +++ b/deploy/tekton/eventlistener/triggerbinding.yaml @@ -8,38 +8,31 @@ metadata: app.kubernetes.io/component: benchmark-mlop spec: params: - # Extract batch sheet URL from webhook payload - # For ArgoCD triggers, this will be empty (sheet created by pipeline) - # For direct webhook triggers, this can be provided - - name: batch-sheet-url - value: $(body.batch_sheet_url) - # Extract submitter information # ArgoCD: "argocd-prod-sync" # Direct webhook: custom value - name: submitted-by value: $(body.submitted_by) - # Extract source to determine trigger type - # "argocd" = triggered by ArgoCD notification - # empty/other = direct webhook trigger + # Extract trigger source for tracking + # Examples: "argocd", "webhook", "manual-test", "jenkins", etc. - name: trigger-source - value: $(body.source) + value: $(body.trigger_source) - # MLOps-specific parameters (optional, for mlops-batches endpoint) - - name: dvc-repo-url - value: $(body.dvc_repo_url) + # Workflow image version for testing + - name: image-version + value: $(body.image_version) - - name: dvc-data-version - value: $(body.dvc_data_version) + # DVC version parameters (required) + - name: dvc-nvr-version + value: $(body.dvc_nvr_version) - - name: s3-endpoint-url - value: $(body.s3_endpoint_url) + - name: dvc-prompts-version + value: $(body.dvc_prompts_version) - - name: s3-input-bucket-name - value: $(body.s3_input_bucket_name) + - name: dvc-known-false-positives-version + value: $(body.dvc_known_false_positives_version) - # Optional: Override workflow version for testing - - name: image-version - value: $(body.image_version) + - name: use-known-false-positive-file + value: $(body.use_known_false_positive_file) diff --git a/deploy/tekton/eventlistener/triggertemplate.yaml b/deploy/tekton/eventlistener/triggertemplate.yaml index 5cd06112..7d6990b2 100644 --- a/deploy/tekton/eventlistener/triggertemplate.yaml +++ b/deploy/tekton/eventlistener/triggertemplate.yaml @@ -9,38 +9,31 @@ metadata: spec: params: # Parameters from TriggerBinding - - name: batch-sheet-url - description: "Google Sheet URL with package list" - default: "" - - name: submitted-by - description: "Trigger source" + description: "Trigger source identifier" default: "eventlistener-webhook" - name: trigger-source - description: "Source of the trigger (argocd, webhook, etc.)" + description: "Tool that triggered the EventListener (argocd, webhook, manual-test, etc.)" default: "webhook" - # MLOps-specific parameters - - name: dvc-repo-url - description: "DVC repository URL for data versioning" - default: "" + - name: image-version + description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" + default: "latest" - - name: dvc-data-version - description: "DVC data version tag" - default: "" + # DVC version parameters (required) + - name: dvc-nvr-version + description: "DVC NVR resource version" - - name: s3-endpoint-url - description: "S3 endpoint URL for MLOps data" - default: "" + - name: dvc-prompts-version + description: "DVC prompts resource version" - - name: s3-input-bucket-name - description: "S3 bucket name for input data" - default: "" + - name: dvc-known-false-positives-version + description: "DVC known false positives resource version" - - name: image-version - description: "Workflow image version for testing (e.g., v2.1.0, sha-abc123)" - default: "" + - name: use-known-false-positive-file + description: "Whether to use known false positive file" + default: "true" resourcetemplates: - apiVersion: tekton.dev/v1 @@ -52,37 +45,34 @@ spec: labels: app.kubernetes.io/name: sast-ai-workflow app.kubernetes.io/component: benchmark-mlop - sast-ai.redhat.com/trigger-type: webhook + sast-ai.redhat.com/trigger-source: $(tt.params.trigger-source) tekton.dev/pipeline: benchmark-mlop-pipeline + annotations: + sast-ai.redhat.com/submitted-by: $(tt.params.submitted-by) + sast-ai.redhat.com/trigger-source: $(tt.params.trigger-source) spec: pipelineRef: name: benchmark-mlop-pipeline params: # Pass parameters to pipeline - - name: batch-sheet-url - value: $(tt.params.batch-sheet-url) - - name: submitted-by value: $(tt.params.submitted-by) - - - name: trigger-source - value: $(tt.params.trigger-source) - - - name: dvc-repo-url - value: $(tt.params.dvc-repo-url) - - name: dvc-data-version - value: $(tt.params.dvc-data-version) + - name: image-version + value: $(tt.params.image-version) + + - name: dvc-nvr-version + value: $(tt.params.dvc-nvr-version) - - name: s3-endpoint-url - value: $(tt.params.s3-endpoint-url) + - name: dvc-prompts-version + value: $(tt.params.dvc-prompts-version) - - name: s3-input-bucket-name - value: $(tt.params.s3-input-bucket-name) + - name: dvc-known-false-positives-version + value: $(tt.params.dvc-known-false-positives-version) - - name: image-version - value: $(tt.params.image-version) + - name: use-known-false-positive-file + value: $(tt.params.use-known-false-positive-file) # Timeout for the entire pipeline timeouts: diff --git a/deploy/tekton/overlays/mlops/kustomization.yaml b/deploy/tekton/overlays/mlops/kustomization.yaml index 64befd36..544ad110 100644 --- a/deploy/tekton/overlays/mlops/kustomization.yaml +++ b/deploy/tekton/overlays/mlops/kustomization.yaml @@ -1,6 +1,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization +# Add suffix to create separate mlops pipeline without overriding base +nameSuffix: -mlops + resources: - ../../base From 96df8f56672eec5675ef8a832b145e1cb7b456b6 Mon Sep 17 00:00:00 2001 From: Yael Date: Wed, 5 Nov 2025 10:28:25 +0200 Subject: [PATCH 3/4] fix: remove duplicate lines from rebase and enhance parameter validation --- deploy/Makefile | 111 +----------------- .../eventlistener/call-orchestrator-api.yaml | 49 ++++++++ .../tekton/eventlistener/eventlistener.yaml | 17 +++ .../eventlistener/poll-batch-status.yaml | 57 +++++++++ .../eventlistener/test-eventlistener.sh | 4 +- .../tekton/eventlistener/triggertemplate.yaml | 2 +- 6 files changed, 127 insertions(+), 113 deletions(-) diff --git a/deploy/Makefile b/deploy/Makefile index dd882f26..caf7681e 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -22,7 +22,6 @@ PROJECT_VERSION ?= project-version DOWNLOAD_REPO ?= false REPO_REMOTE_URL ?= source/code/url -REPO_REMOTE_URL ?= source/code/url REPO_LOCAL_PATH ?= /path/to/repo INPUT_REPORT_FILE_PATH ?= http://<> @@ -56,10 +55,6 @@ S3_INPUT_BUCKET_NAME ?= test GITHUB_REPO_URL ?= https://github.com/RHEcosystemAppEng/sast-ai-workflow.git ARGOCD_NAMESPACE ?= sast-ai -# EventListener Configuration -ORCHESTRATOR_API_URL ?= -MLOPS_ORCHESTRATOR_API_URL ?= - # EventListener Configuration ORCHESTRATOR_API_URL ?= @@ -79,8 +74,6 @@ AWS_SECRET_ACCESS_KEY ?= "" S3_ENDPOINT_URL ?= "" .PHONY: deploy deploy-dev deploy-prod deploy-mlops setup tasks-dev tasks-prod tasks-mlops secrets pipeline scripts configmaps run clean generate-prompts prompts argocd-deploy-dev argocd-deploy-prod argocd-clean eventlistener eventlistener-clean -.PHONY: deploy setup tasks-dev tasks-prod tasks-mlops secrets pipeline scripts configmaps run clean generate-prompts prompts argocd-deploy-prod argocd-clean eventlistener eventlistener-clean - # Unified deploy command # Usage: # make deploy # Deploy base (Google Drive, :latest) @@ -89,22 +82,7 @@ S3_ENDPOINT_URL ?= "" deploy: deploy-$(ENV) deploy-dev: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest -deploy-dev: setup-common tasks-dev argocd-deploy-dev - @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @echo "๐Ÿš€ SAST AI Workflow - Development Deployment" - @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @echo " Environment: Development" - @echo " Container Image: $(CONTAINER_IMAGE)" - @echo "" - @echo "โœ… Development deployment completed successfully!" - -deploy-prod: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):$(IMAGE_VERSION) -deploy-prod: setup tasks-prod argocd-deploy-prod - @if [ -z "$(IMAGE_VERSION)" ]; then \ -deploy: deploy-$(ENV) - -deploy-dev: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest -deploy-dev: setup tasks-dev +deploy-dev: setup tasks-dev argocd-deploy-dev @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo "๐Ÿš€ SAST AI Workflow - Development Deployment" @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @@ -127,17 +105,6 @@ deploy-prod: setup tasks-prod argocd-deploy-prod exit 1; \ fi -deploy-mlops: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest -deploy-mlops: setup tasks-mlops argocd-deploy-mlops - @if [ -z "$(MLOPS_ORCHESTRATOR_API_URL)" ]; then \ - echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ - echo "โŒ ERROR: MLOPS_ORCHESTRATOR_API_URL is required for MLOps deployment"; \ - echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ - echo ""; \ - echo "Usage: make deploy-mlops MLOPS_ORCHESTRATOR_API_URL="; \ - exit 1; \ - fi - deploy-mlops: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest deploy-mlops: setup tasks-mlops argocd-deploy-mlops @if [ -z "$(ORCHESTRATOR_API_URL)" ]; then \ @@ -148,27 +115,14 @@ deploy-mlops: setup tasks-mlops argocd-deploy-mlops echo "Usage: make deploy-mlops ORCHESTRATOR_API_URL="; \ echo ""; \ echo "Example:"; \ - echo " make deploy-mlops MLOPS_ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ - echo "Example:"; \ echo " make deploy-mlops ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ exit 1; \ fi @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo "๐Ÿค– SAST AI Workflow - MLOps Benchmarking Deployment" - @echo "๐Ÿค– SAST AI Workflow - MLOps Benchmarking Deployment" @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo " Environment: MLOps (Benchmarking)" @echo " Container Image: $(CONTAINER_IMAGE)" - @echo " Orchestrator URL: $(MLOPS_ORCHESTRATOR_API_URL)" - @echo "" - @echo "๐ŸŽฏ Deploying EventListener..." - @sed -e 's|ORCHESTRATOR_API_URL_PLACEHOLDER|$(MLOPS_ORCHESTRATOR_API_URL)|g' \ - tekton/eventlistener/benchmark-config.yaml.example > tekton/eventlistener/benchmark-config.yaml - @$(CO) apply -k tekton/eventlistener/ -n $(NAMESPACE) || \ - { echo " โŒ Failed to deploy EventListener resources"; exit 1; } - @echo " โœ“ EventListener deployed" - @echo " Environment: MLOps (Benchmarking)" - @echo " Container Image: $(CONTAINER_IMAGE)" @echo " Orchestrator URL: $(ORCHESTRATOR_API_URL)" @echo "" @echo "๐ŸŽฏ Deploying EventListener..." @@ -180,13 +134,9 @@ deploy-mlops: setup tasks-mlops argocd-deploy-mlops @echo "" @echo "โœ… MLOps deployment completed successfully!" -setup: secrets scripts prompts configmaps - @echo "โœ… MLOps deployment completed successfully!" - setup: secrets scripts prompts configmaps @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo "๐Ÿš€ Common Infrastructure Ready" - @echo "๐Ÿš€ Common Infrastructure Ready" @echo " Context: $(CONTEXT)" @echo " Namespace: $(NAMESPACE)" @@ -200,20 +150,6 @@ tasks-prod: @$(CO) apply -k tekton/overlays/prod -n $(NAMESPACE) @echo " โœ“ Production Tekton resources (versioned)" -tasks-mlops: - @echo "๐Ÿ“‹ Deploying Tekton resources (mlops)..." - @$(CO) apply -k tekton/overlays/mlops -n $(NAMESPACE) - @echo " โœ“ MLOps Tekton resources (MinIO/S3)" -tasks-dev: - @echo "๐Ÿ“‹ Deploying Tekton resources (dev)..." - @$(CO) apply -k tekton/base -n $(NAMESPACE) - @echo " โœ“ Base Tekton resources (base - Google Drive storage)" - -tasks-prod: - @echo "๐Ÿ“‹ Deploying Tekton resources (prod)..." - @$(CO) apply -k tekton/overlays/prod -n $(NAMESPACE) - @echo " โœ“ Production Tekton resources (versioned)" - tasks-mlops: @echo "๐Ÿ“‹ Deploying Tekton resources (mlops)..." @$(CO) apply -k tekton/overlays/mlops -n $(NAMESPACE) @@ -373,7 +309,6 @@ run: @echo " ๐Ÿ”„ Removing old pipeline runs..." @$(CO) delete pipelinerun sast-ai-workflow-pipelinerun \ -n $(NAMESPACE) --ignore-not-found - -n $(NAMESPACE) --ignore-not-found # Create PipelineRun with current parameters @sed \ -e 's|PROJECT_NAME_PLACEHOLDER|$(PROJECT_NAME)|g' \ @@ -481,46 +416,6 @@ eventlistener-clean: @$(CO) delete -k tekton/eventlistener/ -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true @echo " โœ“ EventListener resources removed" -eventlistener: - @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @echo "๐ŸŽฏ EventListener Standalone Update" - @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" - @echo " โš ๏ธ Use 'make deploy-mlops' for full deployment" - @echo "" - @if [ -z "$(ORCHESTRATOR_API_URL)" ]; then \ - echo "โŒ ERROR: ORCHESTRATOR_API_URL is required"; \ - echo ""; \ - echo "Usage:"; \ - echo " make eventlistener ORCHESTRATOR_API_URL="; \ - echo ""; \ - echo "Example:"; \ - echo " make eventlistener ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ - exit 1; \ - fi - @echo "๐ŸŽฏ Deploying EventListener..." - @sed -e 's|ORCHESTRATOR_API_URL_PLACEHOLDER|$(ORCHESTRATOR_API_URL)|g' \ - tekton/eventlistener/benchmark-config.yaml.example > tekton/eventlistener/benchmark-config.yaml - @$(CO) apply -k tekton/eventlistener/ -n $(NAMESPACE) || \ - { echo " โŒ Failed to deploy EventListener resources"; exit 1; } - @echo "" - @echo "โœ… EventListener updated" - @echo "" - @echo "๐Ÿ“Š Verify: oc get eventlistener,task,pipeline -l app.kubernetes.io/component=benchmark-mlop -n $(NAMESPACE)" - @echo "๐Ÿงช Test: cd tekton/eventlistener && ./test-eventlistener.sh" - @echo "" - -eventlistener-clean: - @echo "๐Ÿงน Removing EventListener resources..." - @echo " ๐Ÿƒ Cleaning benchmark PipelineRuns..." - @$(CO) delete pipelinerun -l app.kubernetes.io/component=benchmark-mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true - @echo " โœ“ Benchmark PipelineRuns removed" - @echo " ๐Ÿ“‹ Cleaning benchmark TaskRuns..." - @$(CO) delete taskrun -l app.kubernetes.io/component=benchmark-mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true - @echo " โœ“ Benchmark TaskRuns removed" - @echo " ๐Ÿ—‘๏ธ Removing EventListener infrastructure..." - @$(CO) delete -k tekton/eventlistener/ -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true - @echo " โœ“ EventListener resources removed" - clean: @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo "๐Ÿงน SAST AI Workflow - Cleanup" @@ -546,9 +441,6 @@ clean: elif [ "$(ENV)" = "mlop" ]; then \ $(CO) delete -k tekton/overlays/mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ echo " โœ“ MLOp Tekton resources removed (kustomize overlay)"; \ - elif [ "$(ENV)" = "mlop" ]; then \ - $(CO) delete -k tekton/overlays/mlop -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ - echo " โœ“ MLOp Tekton resources removed (kustomize overlay)"; \ else \ $(CO) delete -k tekton/base -n $(NAMESPACE) --ignore-not-found > /dev/null 2>&1 || true; \ echo " โœ“ Base Tekton resources removed (kustomize base)"; \ @@ -609,7 +501,6 @@ clean: @$(CO) delete secret sast-ai-gitlab-token \ sast-ai-default-llm-creds \ sast-ai-google-service-account \ - sast-ai-google-service-account \ sast-ai-gcs-service-account \ sast-ai-s3-output-credentials \ sast-ai-quay-registry-config \ diff --git a/deploy/tekton/eventlistener/call-orchestrator-api.yaml b/deploy/tekton/eventlistener/call-orchestrator-api.yaml index 3484a72d..b3dad80c 100644 --- a/deploy/tekton/eventlistener/call-orchestrator-api.yaml +++ b/deploy/tekton/eventlistener/call-orchestrator-api.yaml @@ -87,6 +87,55 @@ spec: echo " Use Known False Positive File: $(params.use-known-false-positive-file)" echo "" + # Validate required parameters + echo "Validating parameters..." + VALIDATION_FAILED=0 + + if [ -z "$ORCHESTRATOR_URL" ]; then + echo "ERROR: ORCHESTRATOR_URL is empty or not set" + echo " Check that ConfigMap 'benchmark-config' exists and has key 'orchestrator-api-url'" + VALIDATION_FAILED=1 + fi + + if [ -z "$API_ENDPOINT" ]; then + echo "ERROR: API_ENDPOINT is empty or not set" + echo " Check that ConfigMap 'benchmark-config' exists and has key 'api-batch-endpoint'" + VALIDATION_FAILED=1 + fi + + if [ -z "$(params.dvc-nvr-version)" ]; then + echo "ERROR: dvc-nvr-version parameter is required but empty" + VALIDATION_FAILED=1 + fi + + if [ -z "$(params.dvc-prompts-version)" ]; then + echo "ERROR: dvc-prompts-version parameter is required but empty" + VALIDATION_FAILED=1 + fi + + if [ -z "$(params.dvc-known-false-positives-version)" ]; then + echo "ERROR: dvc-known-false-positives-version parameter is required but empty" + VALIDATION_FAILED=1 + fi + + # Validate use-known-false-positive-file is a valid boolean + USE_KFP="$(params.use-known-false-positive-file)" + if [ "$USE_KFP" != "true" ] && [ "$USE_KFP" != "false" ]; then + echo "ERROR: use-known-false-positive-file must be 'true' or 'false', got: $USE_KFP" + VALIDATION_FAILED=1 + fi + + if [ $VALIDATION_FAILED -eq 1 ]; then + echo "" + echo "Parameter validation failed. Cannot proceed." + echo -n "failed" > $(results.status.path) + echo -n "error" > $(results.batch-id.path) + exit 1 + fi + + echo "โœ“ All parameters validated successfully" + echo "" + # Construct full API URL by concatenating base URL with endpoint FULL_API_URL="${ORCHESTRATOR_URL}${API_ENDPOINT}" echo "Full API URL: $FULL_API_URL" diff --git a/deploy/tekton/eventlistener/eventlistener.yaml b/deploy/tekton/eventlistener/eventlistener.yaml index c799bcd2..88c3dd69 100644 --- a/deploy/tekton/eventlistener/eventlistener.yaml +++ b/deploy/tekton/eventlistener/eventlistener.yaml @@ -10,6 +10,23 @@ spec: serviceAccountName: pipeline triggers: - name: benchmark-mlop-trigger + interceptors: + # CEL Interceptor for validating required parameters + # Fails fast before creating PipelineRun if validation fails + - name: validate-required-params + ref: + name: cel + params: + - name: filter + value: >- + has(body.dvc_nvr_version) && body.dvc_nvr_version != '' && + has(body.dvc_prompts_version) && body.dvc_prompts_version != '' && + has(body.dvc_known_false_positives_version) && body.dvc_known_false_positives_version != '' + - name: overlays + value: + # Add validation status for debugging + - key: validation.passed + expression: "true" bindings: - ref: benchmark-mlop-binding template: diff --git a/deploy/tekton/eventlistener/poll-batch-status.yaml b/deploy/tekton/eventlistener/poll-batch-status.yaml index 6a4e5435..cc00b74f 100644 --- a/deploy/tekton/eventlistener/poll-batch-status.yaml +++ b/deploy/tekton/eventlistener/poll-batch-status.yaml @@ -81,6 +81,63 @@ spec: echo " Timeout: ${TIMEOUT_MINUTES} minutes" echo "" + # Validate required parameters + echo "Validating parameters..." + VALIDATION_FAILED=0 + + if [ -z "$ORCHESTRATOR_URL" ]; then + echo "ERROR: ORCHESTRATOR_URL is empty or not set" + echo " Check that ConfigMap 'benchmark-config' exists and has key 'orchestrator-api-url'" + VALIDATION_FAILED=1 + fi + + if [ -z "$API_ENDPOINT" ]; then + echo "ERROR: API_ENDPOINT is empty or not set" + echo " Check that ConfigMap 'benchmark-config' exists and has key 'api-batch-endpoint'" + VALIDATION_FAILED=1 + fi + + if [ -z "$BATCH_ID" ]; then + echo "ERROR: batch-id parameter is empty or not set" + echo " This parameter should be passed from the previous task" + VALIDATION_FAILED=1 + elif [ "$BATCH_ID" = "error" ] || [ "$BATCH_ID" = "unknown" ]; then + echo "ERROR: Invalid batch-id: $BATCH_ID" + echo " The previous task may have failed to create the batch" + VALIDATION_FAILED=1 + fi + + # Validate poll-interval is a positive integer + if ! echo "$POLL_INTERVAL" | grep -qE '^[0-9]+$'; then + echo "ERROR: poll-interval must be a positive integer, got: $POLL_INTERVAL" + VALIDATION_FAILED=1 + elif [ "$POLL_INTERVAL" -le 0 ]; then + echo "ERROR: poll-interval must be greater than 0, got: $POLL_INTERVAL" + VALIDATION_FAILED=1 + fi + + # Validate timeout is a positive integer + if ! echo "$TIMEOUT_MINUTES" | grep -qE '^[0-9]+$'; then + echo "ERROR: timeout must be a positive integer, got: $TIMEOUT_MINUTES" + VALIDATION_FAILED=1 + elif [ "$TIMEOUT_MINUTES" -le 0 ]; then + echo "ERROR: timeout must be greater than 0, got: $TIMEOUT_MINUTES" + VALIDATION_FAILED=1 + fi + + if [ $VALIDATION_FAILED -eq 1 ]; then + echo "" + echo "Parameter validation failed. Cannot proceed." + echo -n "VALIDATION_FAILED" > $(results.final-status.path) + echo -n "0" > $(results.total-jobs.path) + echo -n "0" > $(results.completed-jobs.path) + echo -n "0" > $(results.failed-jobs.path) + exit 1 + fi + + echo "โœ“ All parameters validated successfully" + echo "" + # Calculate timeout in seconds TIMEOUT_SECONDS=$((TIMEOUT_MINUTES * 60)) START_TIME=$(date +%s) diff --git a/deploy/tekton/eventlistener/test-eventlistener.sh b/deploy/tekton/eventlistener/test-eventlistener.sh index 1cc6714a..8832bc49 100755 --- a/deploy/tekton/eventlistener/test-eventlistener.sh +++ b/deploy/tekton/eventlistener/test-eventlistener.sh @@ -30,7 +30,7 @@ echo " TRIGGER_SOURCE: ${TRIGGER_SOURCE:-manual-test} (argocd, webhook, jenkins echo " IMAGE_VERSION: ${IMAGE_VERSION:-latest}" echo " DVC_NVR_VERSION: ${DVC_NVR_VERSION:-(empty)}" echo " DVC_PROMPTS_VERSION: ${DVC_PROMPTS_VERSION:-(empty)}" -echo " DVC_KFP_VERSION: ${DVC_KFP_VERSION:-(empty)}" +echo " DVC_KNOWN_FALSE_POSITIVES_VERSION: ${DVC_KNOWN_FALSE_POSITIVES_VERSION:-(empty)}" echo " USE_KNOWN_FP: ${USE_KNOWN_FP:-true}" echo "" @@ -191,7 +191,7 @@ send_test_request() { "image_version": "${IMAGE_VERSION:-latest}", "dvc_nvr_version": "${DVC_NVR_VERSION:-v1}", "dvc_prompts_version": "${DVC_PROMPTS_VERSION:-v1}", - "dvc_known_false_positives_version": "${DVC_KFP_VERSION:-v1}", + "dvc_known_false_positives_version": "${DVC_KNOWN_FALSE_POSITIVES_VERSION:-v1}", "use_known_false_positive_file": ${USE_KNOWN_FP:-true} } EOF diff --git a/deploy/tekton/eventlistener/triggertemplate.yaml b/deploy/tekton/eventlistener/triggertemplate.yaml index 7d6990b2..4aedaa7c 100644 --- a/deploy/tekton/eventlistener/triggertemplate.yaml +++ b/deploy/tekton/eventlistener/triggertemplate.yaml @@ -76,5 +76,5 @@ spec: # Timeout for the entire pipeline timeouts: - pipeline: "4h" + pipeline: "2h" From bbbebf5ce9a8641c266aa9e2ac2e1c38f0089186 Mon Sep 17 00:00:00 2001 From: Yael Date: Wed, 5 Nov 2025 12:21:05 +0200 Subject: [PATCH 4/4] fix: Replace manual orchestrator URL requirement with K8s service DNS --- .gitignore | 3 + deploy/Makefile | 33 ++--- deploy/tekton/eventlistener/README.md | 118 ++++++++++++------ .../eventlistener/benchmark-config.yaml | 33 ----- .../benchmark-config.yaml.template | 30 +++++ .../tekton/eventlistener/kustomization.yaml | 6 +- .../eventlistener/test-eventlistener.sh | 2 +- 7 files changed, 123 insertions(+), 102 deletions(-) delete mode 100644 deploy/tekton/eventlistener/benchmark-config.yaml create mode 100644 deploy/tekton/eventlistener/benchmark-config.yaml.template diff --git a/.gitignore b/.gitignore index 3b56fe43..f71eaa6e 100644 --- a/.gitignore +++ b/.gitignore @@ -169,3 +169,6 @@ cython_debug/ evaluation/dataset/* evaluation/reports/* evaluation/known_non_issues_data/* + +# Tekton EventListener generated config +deploy/tekton/eventlistener/benchmark-config.yaml diff --git a/deploy/Makefile b/deploy/Makefile index caf7681e..0a0fbf23 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -56,7 +56,8 @@ GITHUB_REPO_URL ?= https://github.com/RHEcosystemAppEng/sast-ai ARGOCD_NAMESPACE ?= sast-ai # EventListener Configuration -ORCHESTRATOR_API_URL ?= +# Default uses K8s service DNS with namespace parameter +ORCHESTRATOR_API_URL ?= http://sast-ai-orchestrator.$(NAMESPACE).svc.cluster.local:80 # Secret configuration (loaded from .env file) GITLAB_TOKEN ?= "" @@ -107,21 +108,12 @@ deploy-prod: setup tasks-prod argocd-deploy-prod deploy-mlops: CONTAINER_IMAGE=$(IMAGE_REGISTRY)/$(IMAGE_NAME):latest deploy-mlops: setup tasks-mlops argocd-deploy-mlops - @if [ -z "$(ORCHESTRATOR_API_URL)" ]; then \ - echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ - echo "โŒ ERROR: ORCHESTRATOR_API_URL is required for MLOps deployment"; \ - echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”"; \ - echo ""; \ - echo "Usage: make deploy-mlops ORCHESTRATOR_API_URL="; \ - echo ""; \ - echo "Example:"; \ - echo " make deploy-mlops ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ - exit 1; \ - fi + @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo "๐Ÿค– SAST AI Workflow - MLOps Benchmarking Deployment" @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo " Environment: MLOps (Benchmarking)" + @echo " Namespace: $(NAMESPACE)" @echo " Container Image: $(CONTAINER_IMAGE)" @echo " Orchestrator URL: $(ORCHESTRATOR_API_URL)" @echo "" @@ -382,19 +374,12 @@ eventlistener: @echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" @echo " โš ๏ธ Use 'make deploy-mlops' for full deployment" @echo "" - @if [ -z "$(ORCHESTRATOR_API_URL)" ]; then \ - echo "โŒ ERROR: ORCHESTRATOR_API_URL is required"; \ - echo ""; \ - echo "Usage:"; \ - echo " make eventlistener ORCHESTRATOR_API_URL="; \ - echo ""; \ - echo "Example:"; \ - echo " make eventlistener ORCHESTRATOR_API_URL=http://orchestrator.sast-ai.svc.cluster.local:8080"; \ - exit 1; \ - fi + @echo "Using namespace: $(NAMESPACE)" + @echo "Using orchestrator URL: $(ORCHESTRATOR_API_URL)" + @echo "" @echo "๐ŸŽฏ Deploying EventListener..." - @sed -e 's|ORCHESTRATOR_API_URL_PLACEHOLDER|$(ORCHESTRATOR_API_URL)|g' \ - tekton/eventlistener/benchmark-config.yaml.example > tekton/eventlistener/benchmark-config.yaml + @sed -e 's||$(NAMESPACE)|g' \ + tekton/eventlistener/benchmark-config.yaml.template > tekton/eventlistener/benchmark-config.yaml @$(CO) apply -k tekton/eventlistener/ -n $(NAMESPACE) || \ { echo " โŒ Failed to deploy EventListener resources"; exit 1; } @echo "" diff --git a/deploy/tekton/eventlistener/README.md b/deploy/tekton/eventlistener/README.md index 99f5251e..a11d8470 100644 --- a/deploy/tekton/eventlistener/README.md +++ b/deploy/tekton/eventlistener/README.md @@ -18,8 +18,8 @@ Enable MLOps benchmark testing for batch SAST analysis jobs: eventlistener/ โ”œโ”€โ”€ README.md # This file โ”œโ”€โ”€ kustomization.yaml # Kustomize configuration -โ”œโ”€โ”€ benchmark-config.yaml # ConfigMap (generated by make eventlistener) -โ”œโ”€โ”€ benchmark-config.yaml.example # Template (optional reference) +โ”œโ”€โ”€ benchmark-config.yaml.template # ConfigMap template +โ”œโ”€โ”€ benchmark-config.yaml # Generated ConfigMap (git-ignored) โ”œโ”€โ”€ call-orchestrator-api.yaml # Task that calls orchestrator MLOps API โ”œโ”€โ”€ poll-batch-status.yaml # Task that monitors batch completion โ”œโ”€โ”€ benchmark-pipeline.yaml # MLOps benchmark pipeline @@ -29,7 +29,7 @@ eventlistener/ โ””โ”€โ”€ test-eventlistener.sh # Helper script for testing ``` -**Note:** `benchmark-config.yaml` is automatically generated when you run `make eventlistener` with the required parameters. +**Note:** `benchmark-config.yaml` is automatically generated from `benchmark-config.yaml.template` when you run `make eventlistener` and is git-ignored. ## ๐Ÿ“‹ Prerequisites @@ -57,31 +57,54 @@ cd deploy make tasks ENV=mlop NAMESPACE=your-namespace ``` -### Step 2: Find Your Orchestrator URL +### Step 2: Deploy EventListener -```bash -# Find orchestrator service -oc get svc -l app=sast-ai-orchestrator -n your-namespace +Deploy the EventListener (uses defaults for both namespace and URL): -# Typical format: -# http://..svc.cluster.local: -# Example: http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 +```bash +cd deploy +make eventlistener ``` -### Step 3: Deploy EventListener - -Deploy the EventListener with required parameters: +**Default Configuration:** +- Namespace: Auto-detected from current `oc` context +- Orchestrator URL: `http://sast-ai-orchestrator..svc.cluster.local:80` +- Uses existing orchestrator service (matches Helm deployment) +- Uses automatic K8s service discovery +- No manual configuration needed +**Override Options:** ```bash -cd deploy +# Override namespace only +make eventlistener NAMESPACE=custom-namespace + +# Override orchestrator URL only +make eventlistener ORCHESTRATOR_API_URL=http://custom-service.sast-ai.svc.cluster.local:8080 + +# Override both make eventlistener \ - ORCHESTRATOR_API_URL=http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 \ - NAMESPACE=your-namespace + ORCHESTRATOR_API_URL=http://custom-service.custom-ns.svc.cluster.local:8080 \ + NAMESPACE=custom-ns ``` -**Required Parameters:** -- `ORCHESTRATOR_API_URL` - Orchestrator service URL (cluster-internal) -- `NAMESPACE` - Target namespace +**Parameters:** +- `NAMESPACE` - Target namespace (optional, auto-detected from current context) +- `ORCHESTRATOR_API_URL` - Orchestrator service URL (optional, uses K8s service DNS default) + +### Step 3: Verify Orchestrator Service + +The workflow uses the orchestrator's existing Helm service. + +**Quick Verification:** +```bash +# Verify orchestrator service exists +oc get svc sast-ai-orchestrator -n your-namespace + +**Expected Service Configuration:** +- **Name**: `sast-ai-orchestrator` (from orchestrator's Helm chart) +- **Port**: 80 (maps to targetPort 8080) +- **Type**: ClusterIP +- **Endpoints**: Should show pod IP:8080 **What happens:** - โœ… Validates required parameters @@ -223,11 +246,16 @@ oc logs -l tekton.dev/pipelineTask=call-orchestrator-api --tail=100 # View current configuration oc get configmap benchmark-config -o yaml -n your-namespace -# Update if needed - regenerate with new parameters +# Update if needed - regenerate (uses current namespace by default) cd deploy +make eventlistener + +# Or override namespace +make eventlistener NAMESPACE=your-namespace + +# Or with custom orchestrator URL make eventlistener \ - ORCHESTRATOR_API_URL= \ - NAMESPACE=your-namespace + ORCHESTRATOR_API_URL=http://custom-orchestrator-service.your-namespace.svc.cluster.local:8080 ``` ## ๐Ÿ”ง Configuration Reference @@ -261,7 +289,7 @@ The `benchmark-config` ConfigMap is automatically generated by `make eventlisten | Key | Description | Example | |-----|-------------|---------| -| `orchestrator-api-url` | Base URL of orchestrator service | `http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080` | +| `orchestrator-api-url` | Base URL of orchestrator service | `http://sast-ai-orchestrator..svc.cluster.local:80` | | `api-batch-endpoint` | API endpoint path for MLOps batches | `/api/v1/mlops-batches` | **Note:** The `api-batch-endpoint` is automatically set to `/api/v1/mlops-batches` for MLOps benchmarking. @@ -361,23 +389,22 @@ For production use, consider: Deploy to dedicated namespace: ```bash -# Create namespace +# Create and switch to namespace oc new-project sast-ai-benchmark -# Find orchestrator URL -ORCH_URL=$(oc get svc -l app=sast-ai-orchestrator -n sast-ai-benchmark -o jsonpath='{.items[0].metadata.name}') -ORCH_PORT=$(oc get svc -l app=sast-ai-orchestrator -n sast-ai-benchmark -o jsonpath='{.items[0].spec.ports[0].port}') - -# Deploy MLOps pipeline overlay +# Deploy MLOps pipeline overlay (uses current namespace) cd deploy -make tasks ENV=mlop NAMESPACE=sast-ai-benchmark +make tasks ENV=mlop -# Deploy EventListener -make eventlistener \ - ORCHESTRATOR_API_URL=http://${ORCH_URL}.sast-ai-benchmark.svc.cluster.local:${ORCH_PORT} \ - NAMESPACE=sast-ai-benchmark +# Deploy EventListener (auto-detects namespace from context) +make eventlistener + +# Verify orchestrator service exists (from orchestrator's Helm deployment) +oc get svc sast-ai-orchestrator -n sast-ai-benchmark ``` +**Note:** The default configuration auto-detects the current namespace and uses `http://sast-ai-orchestrator..svc.cluster.local:80` (matches the orchestrator's existing Helm service). + This creates both: - The `mlop-sast-ai-workflow-pipeline` that the orchestrator will trigger - The EventListener webhook endpoint for triggering benchmarks @@ -411,15 +438,24 @@ oc delete service el-benchmark-mlop-listener -n your-namespace If you're using this project as a base for your own: -1. **Deploy** with your parameters: +1. **Switch to your namespace and deploy** (auto-detects namespace): ```bash - make eventlistener \ - ORCHESTRATOR_API_URL= \ - NAMESPACE= + oc project + cd deploy + make eventlistener ``` -2. **Customize** labels and naming if needed (edit YAML files in `tekton/eventlistener/`) -3. **Test** with your orchestrator instance using `test-eventlistener.sh` -4. **Extend** pipeline with your specific requirements + +2. **Ensure orchestrator service** is deployed: + ```bash + oc get svc sast-ai-orchestrator -n + + # Should show port 80 -> targetPort 8080 + # The workflow will use: http://sast-ai-orchestrator..svc.cluster.local:80 + ``` + +3. **Customize** labels and naming if needed (edit YAML files in `tekton/eventlistener/`) +4. **Test** with your orchestrator instance using `test-eventlistener.sh` +5. **Extend** pipeline with your specific requirements All configuration is passed as parameters - no manual file editing needed! diff --git a/deploy/tekton/eventlistener/benchmark-config.yaml b/deploy/tekton/eventlistener/benchmark-config.yaml deleted file mode 100644 index 71f11281..00000000 --- a/deploy/tekton/eventlistener/benchmark-config.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# MLOps Benchmark Configuration Example -# -# This is an EXAMPLE file for reference only. -# The actual benchmark-config.yaml is automatically generated by the Makefile. -# -# Recommended deployment method: -# cd deploy -# make eventlistener \ -# ORCHESTRATOR_API_URL=http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 \ -# NAMESPACE=your-namespace -# -# Finding your orchestrator URL: -# oc get svc -l app=sast-ai-orchestrator -# -# Note: The Google Sheet URL is provided via webhook payload when triggering, -# not in this ConfigMap. -# ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: benchmark-config - labels: - app.kubernetes.io/name: sast-ai-workflow - app.kubernetes.io/component: benchmark-mlop -data: - # Orchestrator API base URL (cluster-internal service) - # This will be replaced by the Makefile with your actual orchestrator URL - orchestrator-api-url: "http://sast-ai-orchestrator" - - # API endpoint path for MLOps batches - api-batch-endpoint: "/api/v1/mlops-batches" - diff --git a/deploy/tekton/eventlistener/benchmark-config.yaml.template b/deploy/tekton/eventlistener/benchmark-config.yaml.template new file mode 100644 index 00000000..1e65f3b0 --- /dev/null +++ b/deploy/tekton/eventlistener/benchmark-config.yaml.template @@ -0,0 +1,30 @@ +# MLOps Benchmark Configuration Template +# +# This is a TEMPLATE file. The Makefile generates benchmark-config.yaml from this template. +# The placeholder will be replaced with the actual namespace during deployment. +# +# Default K8s Service Pattern: +# Uses existing orchestrator service: http://sast-ai-orchestrator..svc.cluster.local:80 +# +# Matches the orchestrator's existing Helm service: +# - Service name: sast-ai-orchestrator +# - Port: 80 (service port) -> 8080 (target port) +# - Type: ClusterIP +# +# Note: The generated benchmark-config.yaml is git-ignored and should not be committed. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: benchmark-config + labels: + app.kubernetes.io/name: sast-ai-workflow + app.kubernetes.io/component: benchmark-mlop +data: + # Orchestrator API base URL (cluster-internal service) + # Default: Uses existing orchestrator service (sast-ai-orchestrator:80) + orchestrator-api-url: "http://sast-ai-orchestrator..svc.cluster.local:80" + + # API endpoint path for MLOps batches + api-batch-endpoint: "/api/v1/mlops-batches" + diff --git a/deploy/tekton/eventlistener/kustomization.yaml b/deploy/tekton/eventlistener/kustomization.yaml index a42d73bc..2334d4b0 100644 --- a/deploy/tekton/eventlistener/kustomization.yaml +++ b/deploy/tekton/eventlistener/kustomization.yaml @@ -23,10 +23,10 @@ resources: # ConfigMap generator (alternative to static file) # Uncomment to generate ConfigMap from properties +# Note: Use K8s service DNS pattern for orchestrator URL # configMapGenerator: # - name: benchmark-config # literals: -# - orchestrator-api-url=http://sast-ai-orchestrator.sast-ai.svc.cluster.local:8080 -# - default-batch-sheet-url=https://docs.google.com/spreadsheets/d/YOUR_SHEET_ID/edit -# - api-batch-endpoint=/api/v1/job-batches +# - orchestrator-api-url=http://sast-ai-orchestrator..svc.cluster.local:80 +# - api-batch-endpoint=/api/v1/mlops-batches diff --git a/deploy/tekton/eventlistener/test-eventlistener.sh b/deploy/tekton/eventlistener/test-eventlistener.sh index 8832bc49..8d5902c0 100755 --- a/deploy/tekton/eventlistener/test-eventlistener.sh +++ b/deploy/tekton/eventlistener/test-eventlistener.sh @@ -95,7 +95,7 @@ check_deployment() { echo "" echo "Please deploy the EventListener resources first:" echo " cd deploy" - echo " make eventlistener ORCHESTRATOR_API_URL= NAMESPACE=$NAMESPACE" + echo " make eventlistener # Auto-detects namespace from current context" exit 1 fi