diff --git a/.gitea/workflows/build-containers-on-demand.yml b/.gitea/workflows/build-containers-on-demand.yml new file mode 100644 index 0000000..a3b897c --- /dev/null +++ b/.gitea/workflows/build-containers-on-demand.yml @@ -0,0 +1,287 @@ +name: Build containers when image tags change + +on: + push: + # Uncomment/adjust once working: + # branches: [ main ] + paths: + - "argocd/deployment.yaml" + - "Dockerfile" + - "data-loader/**" + - ".gitea/workflows/build-containers-on-demand.yml" + +jobs: + build-if-image-changed: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - container_name: web + expected_repo: git.baumann.gr/adebaumann/labhelper + build_context: . + container_type: containers + description: main container + - container_name: loader + expected_repo: git.baumann.gr/adebaumann/labhelper-data-loader + build_context: data-loader + container_type: initContainers + description: init-container + + env: + DEPLOY_FILE: "argocd/deployment.yaml" + CONTAINER_NAME: ${{ matrix.container_name }} + EXPECTED_REPO: ${{ matrix.expected_repo }} + CONTAINER_TYPE: ${{ matrix.container_type }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Determine base commit + id: base + shell: bash + run: | + set -euo pipefail + if git rev-parse --verify -q HEAD~1 >/dev/null; then + echo "base=$(git rev-parse HEAD~1)" >> "$GITHUB_OUTPUT" + else + echo "base=$(git hash-object -t tree /dev/null)" >> "$GITHUB_OUTPUT" + fi + + - name: Install yq + shell: bash + run: | + set -euo pipefail + YQ_VER=v4.44.3 + curl -sL "https://github.com/mikefarah/yq/releases/download/${YQ_VER}/yq_linux_amd64" -o /usr/local/bin/yq + chmod +x /usr/local/bin/yq + yq --version + + - name: Read ${{ matrix.description }} image from deployment + id: img + shell: bash + run: | + set -euo pipefail + file="${DEPLOY_FILE:-argocd/deployment.yaml}" + cname="${CONTAINER_NAME}" + expected_repo="${EXPECTED_REPO}" + ctype="${CONTAINER_TYPE}" + + export cname + export expected_repo + export ctype + + echo "========================================" + echo "Checking: $ctype / $cname" + echo "Expected repo: $expected_repo" + echo "========================================" + + # --- functions ------------------------------------------------------ + have_yq() { command -v yq >/dev/null 2>&1; } + + # yq-based extractor (multi-doc aware; Deployment only; container name match) + yq_extract() { + if [[ "$ctype" == "initContainers" ]]; then + yq -r ' + select(.kind == "Deployment") | + .spec.template.spec.initContainers // [] | + map(select(.name == env(cname))) | + .[]?.image + ' "$1" 2>/dev/null | tail -n 1 + else + yq -r ' + select(.kind == "Deployment") | + .spec.template.spec.containers // [] | + map(select(.name == env(cname))) | + .[]?.image + ' "$1" 2>/dev/null | tail -n 1 + fi + } + + # ultra-tolerant fallback: grep around the appropriate block + fallback_extract() { + local block_pattern + local end_pattern + + if [[ "$ctype" == "initContainers" ]]; then + block_pattern="^[[:space:]]*initContainers:" + end_pattern="^[[:space:]]*containers:|^[[:alpha:]][[:alnum:]_:-]*:" + else + block_pattern="^[[:space:]]*containers:" + end_pattern="^[[:alpha:]][[:alnum:]_:-]*:|^[[:space:]]*initContainers:" + fi + + awk -v cname="$cname" -v block="$block_pattern" -v endp="$end_pattern" ' + BEGIN{ in_cont=0; name=""; image="" } + $0 ~ block {in_cont=1; next} + in_cont { + # end of block when we hit the end pattern + if ($0 ~ endp) { in_cont=0 } + # capture name and image lines + if ($0 ~ /^[[:space:]]*-?[[:space:]]*name:[[:space:]]*/) { + name=$0; sub(/^.*name:[[:space:]]*/,"",name); gsub(/^[ "\047]+|[ "\047]+$/,"",name) + } + if ($0 ~ /^[[:space:]]*image:[[:space:]]*/) { + image=$0; sub(/^.*image:[[:space:]]*/,"",image); gsub(/^[ "\047]+|[ "\047]+$/,"",image) + if (name==cname) { print image; exit } + } + } + ' "$1" + } + + list_workload_images() { + echo "== workload $ctype in $1 ==" >&2 + if have_yq; then + if [[ "$ctype" == "initContainers" ]]; then + yq -r ' + select(.kind == "Deployment") | + .spec.template.spec.initContainers // [] | + .[] | "\(.name): \(.image)" + ' "$1" 2>/dev/null | nl -ba >&2 || true + else + yq -r ' + select(.kind == "Deployment") | + .spec.template.spec.containers // [] | + .[] | "\(.name): \(.image)" + ' "$1" 2>/dev/null | nl -ba >&2 || true + fi + else + # coarse list for visibility + if [[ "$ctype" == "initContainers" ]]; then + awk ' + /^ *initContainers:/, /^ *containers:/ { if ($0 ~ /name:|image:/) print } + ' "$1" | nl -ba >&2 || true + else + awk ' + /^ *containers:/, /^[^ ]/ { if ($0 ~ /name:|image:/) print } + ' "$1" | nl -ba >&2 || true + fi + fi + } + # -------------------------------------------------------------------- + + # Ensure yq is present; if install failed earlier, try once more + if ! have_yq; then + echo "yq missing; attempting quick install..." >&2 + curl -fsSL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/local/bin/yq || true + chmod +x /usr/local/bin/yq || true + fi + + # Prepare old file (previous commit) if exists + if git cat-file -e "${{ steps.base.outputs.base }}":"$file" 2>/dev/null; then + git show "${{ steps.base.outputs.base }}:$file" > /tmp/old.yaml + else + : > /tmp/old.yaml + fi + + list_workload_images /tmp/old.yaml || true + list_workload_images "$file" || true + + if have_yq; then + old_image="$(yq_extract /tmp/old.yaml || true)" + new_image="$(yq_extract "$file" || true)" + else + old_image="$(fallback_extract /tmp/old.yaml || true)" + new_image="$(fallback_extract "$file" || true)" + fi + + # If yq path failed to find it, try fallback once more as safety + if [ -z "${new_image:-}" ]; then + new_image="$(fallback_extract "$file" || true)" + fi + if [ -z "${old_image:-}" ]; then + old_image="$(fallback_extract /tmp/old.yaml || true)" + fi + + echo "Old $ctype image: ${old_image:-}" + echo "New $ctype image: ${new_image:-}" + + if [ -z "${new_image:-}" ]; then + echo "ERROR: Could not find $ctype[].name == \"$cname\" image in $file" + exit 1 + fi + + # Split repo and tag + new_repo="${new_image%:*}" + new_tag="${new_image##*:}" + if [[ "$new_repo" != "$expected_repo" ]]; then + echo "ERROR: Found $ctype \"$cname\" image repo is \"$new_repo\" but expected \"$expected_repo\"" + exit 1 + fi + + registry="$(echo "$new_repo" | awk -F/ '{print $1}')" + + { + echo "new_image=$new_image" + echo "new_repo=$new_repo" + echo "new_tag=$new_tag" + echo "registry=$registry" + } >> "$GITHUB_OUTPUT" + + - name: Check if image exists on registry + id: check_image + shell: bash + run: | + set -euo pipefail + new_repo="${{ steps.img.outputs.new_repo }}" + new_tag="${{ steps.img.outputs.new_tag }}" + registry_user="${{ secrets.REGISTRY_USER }}" + registry_password="${{ secrets.REGISTRY_PASSWORD }}" + + # Extract registry host and image name + registry_host=$(echo "$new_repo" | cut -d/ -f1) + image_path=$(echo "$new_repo" | cut -d/ -f2-) + + echo "Checking if $new_repo:$new_tag exists on registry $registry_host" + + # Use Docker Registry API v2 to check manifest + # Format: https://registry/v2/{image_path}/manifests/{tag} + manifest_url="https://${registry_host}/v2/${image_path}/manifests/${new_tag}" + + # Check with authentication + http_code=$(curl -s -o /dev/null -w "%{http_code}" \ + -u "${registry_user}:${registry_password}" \ + -H "Accept: application/vnd.docker.distribution.manifest.v2+json,application/vnd.docker.distribution.manifest.list.v2+json" \ + "$manifest_url" || echo "000") + + if [ "$http_code" = "200" ]; then + echo "Image already exists on registry (HTTP $http_code)" + echo "exists=true" >> "$GITHUB_OUTPUT" + else + echo "Image does not exist on registry (HTTP $http_code)" + echo "exists=false" >> "$GITHUB_OUTPUT" + fi + + - name: Skip if image already exists + if: steps.check_image.outputs.exists == 'true' + run: echo "${{ matrix.description }} image ${{ steps.img.outputs.new_image }} already exists on registry; skipping build." + + - name: Set up Buildx + if: steps.check_image.outputs.exists == 'false' + uses: docker/setup-buildx-action@v3 + + - name: Log in to registry + if: steps.check_image.outputs.exists == 'false' + uses: docker/login-action@v3 + with: + registry: ${{ steps.img.outputs.registry }} + username: ${{ secrets.REGISTRY_USER }} + password: ${{ secrets.REGISTRY_PASSWORD }} + + - name: Build and push ${{ matrix.description }} (exact tag from deployment) + if: steps.check_image.outputs.exists == 'false' + uses: docker/build-push-action@v6 + with: + context: ${{ matrix.build_context }} + push: true + tags: | + ${{ steps.img.outputs.new_image }} + ${{ steps.img.outputs.new_repo }}:latest + labels: | + org.opencontainers.image.source=${{ gitea.repository }} + org.opencontainers.image.revision=${{ gitea.sha }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.gitea/workflows/check_code_in_sonarqube.yaml b/.gitea/workflows/check_code_in_sonarqube.yaml new file mode 100644 index 0000000..9a2c995 --- /dev/null +++ b/.gitea/workflows/check_code_in_sonarqube.yaml @@ -0,0 +1,67 @@ +on: + push: + # branches: + # - main + # - development + pull_request: + types: [opened, synchronize, reopened] + +name: SonarQube Scan +jobs: + sonarqube: + name: SonarQube Trigger + runs-on: ubuntu-latest + steps: + - name: Checking out + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + pip install -r requirements.txt + + - name: Run tests with coverage + run: | + coverage run --source='.' manage.py test + coverage xml + + - name: Set up JDK 17 + uses: actions/setup-java@v3 + with: + java-version: '17' + distribution: 'temurin' + + - name: Cache SonarQube packages + uses: actions/cache@v3 + with: + path: ~/.sonar/cache + key: ${{ runner.os }}-sonar + restore-keys: ${{ runner.os }}-sonar + + - name: Download and setup SonarScanner + run: | + mkdir -p $HOME/.sonar + wget -q https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-5.0.1.3006-linux.zip + unzip -q sonar-scanner-cli-5.0.1.3006-linux.zip -d $HOME/.sonar/ + echo "$HOME/.sonar/sonar-scanner-5.0.1.3006-linux/bin" >> $GITHUB_PATH + + - name: Verify Java version + run: java -version + + - name: SonarQube Scan + env: + SONAR_HOST_URL: ${{ secrets.SONARQUBE_HOST }} + SONAR_TOKEN: ${{ secrets.SONARQUBE_TOKEN }} + run: | + sonar-scanner \ + -Dsonar.projectKey=${{ github.event.repository.name }} \ + -Dsonar.sources=. \ + -Dsonar.host.url=${SONAR_HOST_URL} \ + -Dsonar.token=${SONAR_TOKEN} \ + -Dsonar.python.coverage.reportPaths=coverage.xml diff --git a/argocd/001_pvc.yaml b/argocd/001_pvc.yaml new file mode 100644 index 0000000..0d60493 --- /dev/null +++ b/argocd/001_pvc.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: labhelper-data-pvc + namespace: labhelper +spec: + accessModes: + - ReadWriteMany + storageClassName: nfs + resources: + requests: + storage: 2Gi + diff --git a/argocd/deployment.yaml b/argocd/deployment.yaml new file mode 100644 index 0000000..57263ba --- /dev/null +++ b/argocd/deployment.yaml @@ -0,0 +1,70 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: django + namespace: labhelper +spec: + replicas: 1 + selector: + matchLabels: + app: django + template: + metadata: + labels: + app: django + spec: + securityContext: + fsGroup: 999 + fsGroupChangePolicy: "OnRootMismatch" + initContainers: + - name: loader + image: git.baumann.gr/adebaumann/labhelper-data-loader:0.001 + command: [ "sh","-c","cp -n preload/preload.sqlite3 /data/db.sqlite3; chown -R 999:999 /data; ls -la /data; sleep 10; exit 0" ] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: web + image: git.baumann.gr/adebaumann/labhelper:0.001 + imagePullPolicy: Always + ports: + - containerPort: 8000 + volumeMounts: + - name: data + mountPath: /app/data + readinessProbe: + httpGet: + path: / + port: 8000 + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 2 + failureThreshold: 6 + livenessProbe: + httpGet: + path: / + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 20 + timeoutSeconds: 2 + failureThreshold: 3 + volumes: + - name: data + persistentVolumeClaim: + claimName: labhelper-data-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: django + namespace: labhelper +spec: + type: ClusterIP + selector: + app: django + ports: + - name: http + protocol: TCP + port: 8000 + targetPort: 8000 + diff --git a/argocd/ingress.yaml b/argocd/ingress.yaml new file mode 100644 index 0000000..833c906 --- /dev/null +++ b/argocd/ingress.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: django + namespace: labhelper + annotations: + argocd.argoproj.io/ignore-healthcheck: "true" +spec: + ingressClassName: traefik + rules: + - host: labhelper.adebaumann.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: django + port: + number: 8000 diff --git a/argocd/nfs-pv.yaml b/argocd/nfs-pv.yaml new file mode 100644 index 0000000..831cd96 --- /dev/null +++ b/argocd/nfs-pv.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: labhelper-data-pv + namespace: labhelper +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs + nfs: + server: 192.168.17.199 + path: /mnt/user/labhelper \ No newline at end of file diff --git a/argocd/nfs-storageclass.yaml b/argocd/nfs-storageclass.yaml new file mode 100644 index 0000000..1fb6d8a --- /dev/null +++ b/argocd/nfs-storageclass.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs +provisioner: kubernetes.io/no-provisioner +allowVolumeExpansion: true +reclaimPolicy: Retain +volumeBindingMode: Immediate \ No newline at end of file diff --git a/data-loader/Dockerfile b/data-loader/Dockerfile new file mode 100644 index 0000000..0f22e8d --- /dev/null +++ b/data-loader/Dockerfile @@ -0,0 +1,11 @@ +FROM alpine:3.22.1 +RUN addgroup -S appuser && \ + adduser -S appuser -G appuser && \ + mkdir /preload && \ + chown -R appuser:appuser /preload +COPY preload.sqlite3 /preload/ +RUN chown appuser:appuser /preload/preload.sqlite3 +RUN mkdir /data +RUN chown appuser:appuser /data +USER root + diff --git a/data-loader/preload.sqlite3 b/data-loader/preload.sqlite3 new file mode 100644 index 0000000..7b7b92c Binary files /dev/null and b/data-loader/preload.sqlite3 differ