Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 28 additions & 12 deletions .devcontainer/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,23 @@ See [tests/ctst/README.md](../tests/ctst/README.md) for more details on building

## Running e2e tests in the codespace

To run the CTST tests in the codespace, head to `.github/script/end2end/` and run `run-e2e-ctst.sh`
script. Some variables need to be exported before running the tests.
### Node tests (mocha)

```bash
env_variables=$(yq eval '.env | to_entries | .[] | .key + "=" + .value' .github/workflows/end2end.yaml | sed 's/\${{[^}]*}}//g') && export $env_variables
export E2E_IMAGE_TAG=latest
export GCP_BACKEND_DESTINATION_LOCATION=
# Set up the test environment (endpoints, credentials, mongo, TLS)
source .github/scripts/end2end/setup-e2e-env.sh

cd .github/scripts/end2end/
bash run-e2e-test.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "backbeat" "default"
# Run mocha directly (setup-e2e-env.sh already cd's to node_tests/)
yarn mocha --exit -t 10000 --recursive smoke_tests
yarn mocha --exit -t 10000 --recursive cloudserver/bucketGetV2
yarn mocha --exit -t 10000 --grep "should list objects" --recursive cloudserver/bucketGetV2
```

### CTST tests (cucumber)

```bash
cd tests/ctst
./run-ctst-locally.sh @yourTag
```

## Accessing s3 service
Expand Down Expand Up @@ -67,10 +74,19 @@ Now you can use aws cli to interact with the S3 service

## Troubleshooting

### Know Issues

Credentials for storage_manager will stop working when the Codespace is older than 12h
This is because we do an assume role in accounts.py with a max duration of 12h

### Inspecting Codespace creation logs

You can inspect the logs of the Codespace creation this way:
1. Press `Ctrl+Shift+P` (or `Cmd+Shift+P` on Mac)
2. Type "Codespaces: Export Logs" and select it
3. A zip file will be downloaded to your local machine
4. In the zip, look at the `creation.log` file
You can inspect the logs of the Codespace creation in 2 ways way:
1. When Codespace creation is still running :
Use Cmd/Ctrl + Shift + P -> View Creation Log to see full logs

2. When the setup is finished, dump the logs :
a. Press `Ctrl+Shift+P` (or `Cmd+Shift+P` on Mac)
b. Type "Codespaces: Export Logs" and select it
c. A zip file will be downloaded to your local machine
d. In the zip, look at the `creation.log` file
14 changes: 12 additions & 2 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"features": {
"ghcr.io/devcontainers/features/docker-in-docker": {},
"ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers/features/node:1": {},
"ghcr.io/devcontainers/features/node:1": { "version": "20" },
"ghcr.io/devcontainers/features/python:1": {},
"ghcr.io/devcontainers/features/sshd:1": {},
"ghcr.io/devcontainers-extra/features/kind:1": {},
Expand Down Expand Up @@ -96,7 +96,17 @@
"vscode": {
"extensions": [
"ms-kubernetes-tools.vscode-kubernetes-tools",
]
"cucumberopen.cucumber-official"
],
"settings": {
"cucumber.glue": [
"tests/ctst/steps/**/*.ts",
"tests/ctst/common/**/*.ts"
],
"cucumber.features": [
"tests/ctst/features/**/*.feature"
]
}
}
},
"containerEnv": {
Expand Down
40 changes: 15 additions & 25 deletions .devcontainer/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,19 @@

set -e

env_variables=$(yq eval '.env | to_entries | .[] | .key + "=" + .value' .github/workflows/end2end.yaml | sed 's/\${{[^}]*}}//g') && export $env_variables
export GIT_ACCESS_TOKEN=${GITHUB_TOKEN}
export E2E_IMAGE_TAG=latest

# Persist workflow env vars so they survive across terminal sessions
ZENKO_ENV_FILE="$HOME/.zenko.env"
yq eval '.env | to_entries | .[] | "export " + .key + "=" + (.value | tostring | @sh)' .github/workflows/end2end.yaml \
| sed 's/\${{[^}]*}}//g' > "$ZENKO_ENV_FILE"
echo 'export GIT_ACCESS_TOKEN="${GITHUB_TOKEN}"' >> "$ZENKO_ENV_FILE"
# Disable GCP tests as we don't have credentials setup in devcontainer
export GCP_BACKEND_DESTINATION_LOCATION=
echo 'export GCP_BACKEND_DESTINATION_LOCATION=' >> "$ZENKO_ENV_FILE"

# Source now for this session
source "$ZENKO_ENV_FILE"

# Auto-source in future Codespace terminals
echo '[ -f "$HOME/.zenko.env" ] && source "$HOME/.zenko.env"' >> "$HOME/.bashrc"

GITHUB_ENV=$(mktemp /tmp/github_env.XXXXXX)

Expand All @@ -22,10 +29,10 @@ for i in $(seq 0 $array_length); do
working_dir=$(yq ".runs.steps[$i].working-directory" .github/actions/deploy/action.yaml)
run_command=$(yq ".runs.steps[$i].run" .github/actions/deploy/action.yaml)

# We don't want to run `run-e2e-test.sh` because it is used for linting here, user will run it manually if needed after deployment
# We can't run `configure-e2e.sh` here because it needs an image that is not yet built and sent to kind, will be run after
# We can't run `configure-e2e.sh` here because it needs services to be ready first, will be run after
# User will run tests manually after deployment
(
if [[ "$run_command" != "null" && "$run_command" != *"configure-e2e.sh"* && "$run_command" != *"run-e2e-test.sh"* ]]; then
if [[ "$run_command" != "null" && "$run_command" != *"configure-e2e.sh"* ]]; then
# Inject env 'generated' from previous steps
source "$GITHUB_ENV"

Expand All @@ -47,23 +54,6 @@ for i in $(seq 0 $array_length); do
)
done

(
cd tests/zenko_tests

envsubst < 'e2e-config.yaml.template' > 'e2e-config.yaml'
if [[ "${ENABLE_RING_TESTS}" == "false" ]]; then
yq -i 'del(.locations[] | select(.locationType == "location-scality-ring-s3-v1"))' e2e-config.yaml
fi

if [ -z "$GCP_BACKEND_DESTINATION_LOCATION" ]; then
yq -i 'del(.locations[] | select(.locationType == "location-gcp-v1"))' e2e-config.yaml
fi

docker build -t $E2E_IMAGE_NAME:$E2E_IMAGE_TAG .
kind load docker-image ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG}
docker rmi ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG}
)

(
cd .github/scripts/end2end

Expand Down
6 changes: 1 addition & 5 deletions .github/actions/deploy/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,5 @@ runs:
if: ${{ inputs.deploy_metadata == 'true' }}
- name: End-to-end configuration
shell: bash
run: bash configure-e2e.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "default"
working-directory: ./.github/scripts/end2end
- name: Linting
shell: bash
run: bash run-e2e-test.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "lint" "default"
run: bash configure-e2e.sh "end2end" "default"
working-directory: ./.github/scripts/end2end
9 changes: 7 additions & 2 deletions .github/scripts/end2end/configure-e2e-ctst.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ set -exu

DIR=$(dirname "$0")

# Set up ingress endpoints and /etc/hosts for out-of-cluster access
source "$DIR/configure-e2e-endpoints.sh"

# Get kafka image name and tag
kafka_image() {
source <( "$DIR"/../../../solution/kafka_build_vars.sh )
Expand Down Expand Up @@ -76,8 +79,10 @@ UUID=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernet
UUID=${UUID%.*}
UUID=${UUID:1}

echo "127.0.0.1 iam.zenko.local s3-local-file.zenko.local keycloak.zenko.local \
sts.zenko.local management.zenko.local s3.zenko.local website.mywebsite.com utilization.zenko.local" | sudo tee -a /etc/hosts
# Ensure additional hostnames are in /etc/hosts
if ! grep -q "s3-local-file.zenko.local" /etc/hosts 2>/dev/null; then
echo "127.0.0.1 s3-local-file.zenko.local website.mywebsite.com" | sudo tee -a /etc/hosts
fi

# Add bucket notification target
envsubst < ./configs/notification_destinations.yaml | kubectl apply -f -
Expand Down
137 changes: 137 additions & 0 deletions .github/scripts/end2end/configure-e2e-endpoints.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
#!/bin/bash
# configure-e2e-endpoints.sh
#
# Creates ingress resources for internal services that don't have one,
# and adds all *.zenko.local hostnames to /etc/hosts.
#
# This allows tests to run outside the cluster (directly on the CI runner)
# using the same ingress-based endpoints for all Zenko services.
#
# Idempotent: safe to source multiple times.
#
# Usage:
# source configure-e2e-endpoints.sh # sets ZENKO_* endpoint vars
# bash configure-e2e-endpoints.sh # just creates ingresses + /etc/hosts

# Only set strict mode when executed directly, not when sourced
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
set -eu
fi

ZENKO_NAME="${ZENKO_NAME:-end2end}"
NAMESPACE="${NAMESPACE:-default}"

# --- Create missing Ingress resources ---

apply_ingress() {
local name="$1"
local host="$2"
local service="$3"

# Skip if an ingress already serves this host (e.g., from a prior Zenko instance in PRA)
if kubectl get ingress -A -o jsonpath='{.items[*].spec.rules[*].host}' | grep -qw "${host}"; then
echo "Ingress for ${host} already exists, skipping"
return
fi

kubectl apply -f - <<EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${name}
namespace: ${NAMESPACE}
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
ingressClassName: nginx
rules:
- host: ${host}
http:
paths:
- backend:
service:
name: ${service}
port:
name: http
path: /
pathType: Prefix
EOF
}

# Backbeat API — used by node tests (CRR) and CTST
apply_ingress \
"${ZENKO_NAME}-backbeat-api-ingress" \
"backbeat-api.zenko.local" \
"${ZENKO_NAME}-management-backbeat-api"

# Vault auth API — used by CTST
apply_ingress \
"${ZENKO_NAME}-vault-auth-api-ingress" \
"vault-auth.zenko.local" \
"${ZENKO_NAME}-connector-vault-auth-api"

# S3C (Ring) — only when metadata namespace exists (ENABLE_RING_TESTS=true)
if kubectl get namespace metadata &>/dev/null; then
kubectl apply -f - <<EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: s3c-ingress
namespace: metadata
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
ingressClassName: nginx
rules:
- host: s3c.local
http:
paths:
- backend:
service:
name: s3c-cloudserver
port:
number: 8000
path: /
pathType: Prefix
EOF
fi

# --- Wait for ingress controller to pick them up ---

if kubectl get ingress "${ZENKO_NAME}-backbeat-api-ingress" &>/dev/null; then
kubectl wait --for=jsonpath='{.status.loadBalancer.ingress}' \
ingress/${ZENKO_NAME}-backbeat-api-ingress \
ingress/${ZENKO_NAME}-vault-auth-api-ingress \
--timeout=60s 2>/dev/null || true
fi

if kubectl get ingress s3c-ingress -n metadata &>/dev/null; then
kubectl wait --for=jsonpath='{.status.loadBalancer.ingress}' \
ingress/s3c-ingress -n metadata \
--timeout=60s 2>/dev/null || true
fi

# --- /etc/hosts setup ---

ZENKO_HOSTS="s3.zenko.local iam.zenko.local sts.zenko.local management.zenko.local keycloak.zenko.local utilization.zenko.local backbeat-api.zenko.local vault-auth.zenko.local aws-mock.zenko.local azure-mock.zenko.local devstoreaccount1.blob.azure-mock.zenko.local devstoreaccount1.queue.azure-mock.zenko.local s3c.local"

if ! grep -q "backbeat-api.zenko.local" /etc/hosts 2>/dev/null; then
echo "127.0.0.1 ${ZENKO_HOSTS}" | sudo tee -a /etc/hosts
fi

# --- Export endpoint variables ---
# These use the ingress hostnames, reachable from outside the cluster.

export CLOUDSERVER_HOST="s3.zenko.local"
export CLOUDSERVER_ENDPOINT="http://s3.zenko.local"
export BACKBEAT_API_ENDPOINT="http://backbeat-api.zenko.local"
export VAULT_ENDPOINT="http://iam.zenko.local"
export VAULT_STS_ENDPOINT="http://sts.zenko.local"
export VAULT_AUTH_HOST="vault-auth.zenko.local"

echo "=== Endpoints configured for out-of-cluster access ==="
echo " S3: ${CLOUDSERVER_ENDPOINT}"
echo " Backbeat API: ${BACKBEAT_API_ENDPOINT}"
echo " Vault IAM: ${VAULT_ENDPOINT}"
echo " Vault STS: ${VAULT_STS_ENDPOINT}"
echo " Vault Auth: http://${VAULT_AUTH_HOST}"
Loading
Loading