merge step 2

This commit is contained in:
Richard Robert Reitz 2024-12-17 20:06:16 +01:00
parent 163543dbe1
commit f3dfdf66ea
23 changed files with 2255 additions and 30 deletions

View file

@ -15,6 +15,14 @@ controller:
proxy-buffer-size: 32k
use-forwarded-headers: "true"
#monitoring nginx
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: "ingress-nginx"
enabled: true
{{{ if eq .Env.CLUSTER_TYPE "kind" }}}
hostPort:
enabled: true

View file

@ -14,4 +14,10 @@ persistence:
buckets:
- name: edfbuilder-backups
consoleIngress:
enabled: true
ingressClassName: nginx
hosts:
- minio-backup.{{{ .Env.DOMAIN }}}
existingSecret: root-creds

View file

@ -14,7 +14,7 @@ spec:
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true # TODO: RIRE What does this mean: do not copy metdata, since (because of its large size) it can lead to sync failure
- ServerSideApply=true # do not copy metdata, since (because of its large size) it can lead to sync failure
destination:
name: in-cluster
namespace: monitoring

View file

@ -15,7 +15,7 @@ grafana:
syncPolicy:
syncOptions:
- ServerSideApply=true
- ServerSideApply=true
sidecar:
dashboards:
@ -32,4 +32,14 @@ grafana:
server:
domain: {{{ .Env.DOMAIN }}}
root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true
serve_from_sub_path: true
serviceMonitor:
# If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator
enabled: true
#monitoring nginx
prometheus:
prometheusSpec:
podMonitorSelectorNilUsesHelmValues: false
serviceMonitorSelectorNilUsesHelmValues: false

View file

@ -1,15 +0,0 @@
grafana:
namespaceOverride: "monitoring"
grafana.ini:
server:
domain: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live
root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true
ingress:
enabled: true
ingressClassName: nginx
hosts:
- {{{ .Env.DOMAIN }}}
path: /grafana

View file

@ -3,11 +3,5 @@ loki:
replication_factor: 1
auth_enabled: false
#experimental
storageConfig:
# boltdb_shipper:
# shared_store: s3
# aws:
# s3: s3://${cluster_region}
# bucketnames: ${bucket_name}
filesystem: null
# storageConfig:
# filesystem: null

View file

@ -0,0 +1,31 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argo-workflows-ingress
namespace: argo
annotations:
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
ingressClassName: "nginx"
rules:
- host: localhost
http:
paths:
- path: /argo-workflows(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: argo-server
port:
name: web
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- path: /argo-workflows(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: argo-server
port:
name: web

View file

@ -9,6 +9,7 @@ spec:
- ./argo-workflows/template.yaml
- ./app-with-bucket/template.yaml
- ./demo-go-hello-world/template.yaml
- ./spring-petclinic/template.yaml
---
apiVersion: backstage.io/v1alpha1
kind: Location

View file

@ -0,0 +1,94 @@
## Spring PetClinic Template
This template can deploy a fork of Spring's PetClinic and showcase the entire
development workflow from version control over build and deployment into
kubernetes to monitoring the state at runtime.
The goal is to demonstrate the migration of an existing GitHub project into the
stack by applying only minimal changes for compatibility. These changes
include:
- Overwriting the CI workflows due to compatibility with forgejo actions and
container issues, thus making builds container less.
- Extending the kubernetes manifests to use Ingress and Service objects
- Integrate Prometheus monitoring endpoint
### Backstage - Forking
The Backstage template allows you to define a URL to 'fork' from. However,
the template does not actually create a git fork but downloads the latest
`HEAD` from a given branch. The source repository has to be known by backstage
so it can apply the correct download mechanism, see `backend.integrations` in
the backstage config.
In the first step, the original source code is downloaded from the given
upstream repository. Subsequently, overrides provided by the template are
copied into the codebase replacing some workflow and deployment files.
This 'merged' codebase is pushed into a new git repository in the supplied
forgejo instance. Additionally, an ArgoCD deployment is created based on this
new given repo.
### Forgejo - CI with Forgejo Actions
As soon as the patched codebase is pushed into the forgejo git repository,
forgejo actions are triggered and start executing the three existing workflows.
However, only the two build workflows are patched to work within the current
stack, the third deployment workflow fails as it cannot start a kind cluster.
In the current configuration, workflows are by default executed in a minimal
node-debian container. This suffices in most cases to run GitHub actions.
Like in the original upstream repository on GitHub, a Gradle- and a Maven-based
workflow are started. The Gradle one only executes a simple java build while
the Maven version also includes the creation of a container image and a
trivy-based security scan.
Both workflows are patched as referencing of actions differs in forgejo actions
from GitHub actions. In GitHub Actions, actions are rerefenced as paths to
github repositories. In Forgejo Actions, this mechanism similarly refers to
actions hosted on `code.forgejo.org` even on self-hosted instances. As only a
small subset of actions is ported to `code.forgejo.org` due to licensing and
compatibility (not all GitHub actions can work with forgejo actions), forgejo
actions also allow referencing actions by URL. Thus, the action
`https://github.com/actions/setup-java` instructs the forgejo runner to
download the action from GitHub. (The default actions repository can be
overwritten)
Creating the application container within the Maven workflow is accomplished
without using 'native' container tooling, i.e. docker or podman. Besides this
being favorable as it introduces less side effects, the current stack
implementation does not support running nested containers, yet.
Furthermore, as the system uses self-signed certificates, certificate checks
are disabled throughout the system for now.
After a successful build, the container image is published into the Forgejo
container registry and can be pulled for deployment.
### ArgoCD - Deployment
The PetClinic consists of two components, a persistent PostgreSQL database and
the java application that is compiled from source. On execution of the
Backstage template an application deployment is created in ArgoCD. The
resources defined in the newly created git repository are synchronized into the
kubernetes cluster. However, as the java app container has to be built by
the CI workflows, the deployment will initially fail but become successful as
soon as the container image is available.
### Prometheus & Grafana - Monitoring
Prometheus and Grafana among others are deployed as apart of the IDP monitoring
stack. To integrate with these existing components the Backstage template adds
a ServiceMonitor definition to the deployment of the PetClinic. It instructs
Prometheus to scrape the `actuator/prometheus` endpoint in specific intervals.
The data contains jvm health data and can be visualized in Grafana.
As the upstream PetClinic on GitHub does not contain the necessary dependencies
to enable the Prometheus endpoint, the app is by default bootstrapped from
a fork that contains the `micrometer-registry-prometheus` dependency.

View file

@ -0,0 +1,32 @@
# This workflow will build a Java project with Gradle, and cache/restore any dependencies to improve the workflow execution time
# For more information see: https://docs.github.com/en/actions/use-cases-and-examples/building-and-testing/building-and-testing-java-with-gradle
name: Java CI with Gradle
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
java: [ '17' ]
steps:
- uses: actions/checkout@v4
- name: Set up JDK {% raw %}${{matrix.java}}{% endraw %}
uses: https://github.com/actions/setup-java@v4
with:
java-version: '{% raw %}${{matrix.java}}{% endraw %}'
distribution: 'adopt'
cache: maven
- name: Setup Gradle
uses: https://github.com/gradle/actions/setup-gradle@v4
- name: Build with Gradle
run: ./gradlew build

View file

@ -0,0 +1,63 @@
# This workflow will build a Java project with Maven, and cache/restore any dependencies to improve the workflow execution time
# For more information see: https://docs.github.com/en/actions/use-cases-and-examples/building-and-testing/building-and-testing-java-with-maven
name: Java CI with Maven
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
#container:
# image: ghcr.io/catthehacker/ubuntu:act-latest # the large image
strategy:
matrix:
java: [ '17' ]
steps:
- uses: actions/checkout@v4
- name: Set up JDK {% raw %}${{matrix.java}}{% endraw %}
uses: https://github.com/actions/setup-java@v4
with:
java-version: '{% raw %}${{matrix.java}}{% endraw %}'
distribution: 'adopt'
cache: maven
- name: Build with Maven Wrapper
run: ./mvnw -B verify
- name: Build image
#run: ./mvnw spring-boot:build-image # the original image build
run: |
export CONTAINER_REPO=$(echo {% raw %}${{ env.GITHUB_REPOSITORY }}{% endraw %} | tr '[:upper:]' '[:lower:]')
./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:build -Djib.allowInsecureRegistries=true -Dimage=gitea.{{{ .Env.DOMAIN }}}/${CONTAINER_REPO}:latest -Djib.to.auth.username={% raw %}${{ github.actor }}{% endraw %} -Djib.to.auth.password={% raw %}${{ secrets.PACKAGES_TOKEN }}{% endraw %}
- name: Build image as tar
run: |
./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:buildTar -Djib.allowInsecureRegistries=true
# separating the trivy scan into another job is not necessary. It, however, demonstrates forgejo's compatibility with GitHub
- uses: forgejo/upload-artifact@v4
with:
name: petclinic-image
path: target/jib-image.tar
env:
NODE_TLS_REJECT_UNAUTHORIZED: 0 # This is necessary due to self signed certs for forgejo, proper setups can skip this
securityscan:
runs-on: ubuntu-latest
#container:
# image: aquasec/trivy # the container does not contain node...
steps:
- uses: forgejo/download-artifact@v4
with:
name: petclinic-image
env:
NODE_TLS_REJECT_UNAUTHORIZED: 0 # This is necessary due to self signed certs for forgejo, proper setups can skip this
- name: install trivy from deb package
run: |
wget -O trivy.deb https://github.com/aquasecurity/trivy/releases/download/v0.58.0/trivy_0.58.0_Linux-64bit.deb
DEBIAN_FRONTEND=noninteractive dpkg -i trivy.deb
- name: scan the image
run: trivy image --input jib-image.tar

View file

@ -0,0 +1,36 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: ${{ values.name }}
description: This is a Backstage component created from the custom template that forks the PetClinic
annotations:
backstage.io/techdocs-ref: dir:.
backstage.io/kubernetes-label-selector: 'entity-id=${{ values.name }}'
backstage.io/kubernetes-namespace: gitea
links:
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Repo URL
icon: git
spec:
owner: guests
lifecycle: experimental
type: service
system: ${{ values.name | dump }}
---
apiVersion: backstage.io/v1alpha1
kind: System
metadata:
name: ${{ values.name | dump }}
description: A system for managing services created from the Gitea template.
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Gitea Repo
icon: git
spec:
owner: guests
lifecycle: experimental
type: service

View file

@ -0,0 +1,76 @@
---
apiVersion: v1
kind: Secret
metadata:
name: demo-db
namespace: ${{ values.namespace }}
type: servicebinding.io/postgresql
stringData:
type: "postgresql"
provider: "postgresql"
host: "demo-db"
port: "5432"
database: "petclinic"
username: "user"
password: "pass"
---
apiVersion: v1
kind: Service
metadata:
name: demo-db
namespace: ${{ values.namespace }}
spec:
ports:
- port: 5432
selector:
app: demo-db
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: demo-db
namespace: ${{ values.namespace }}
labels:
app: demo-db
spec:
selector:
matchLabels:
app: demo-db
template:
metadata:
labels:
app: demo-db
spec:
containers:
- image: postgres:17
name: postgresql
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: demo-db
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: demo-db
key: password
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: demo-db
key: database
ports:
- containerPort: 5432
name: postgresql
livenessProbe:
tcpSocket:
port: postgresql
readinessProbe:
tcpSocket:
port: postgresql
startupProbe:
tcpSocket:
port: postgresql

View file

@ -0,0 +1,125 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: ${{ values.namespace }}
---
apiVersion: v1
kind: Service
metadata:
name: petclinic
namespace: ${{ values.namespace }}
labels:
app: petclinic
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: http
name: http
selector:
app: petclinic
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${{ values.namespace }}-petclinic
namespace: ${{ values.namespace }}
spec:
ingressClassName: nginx
rules:
- host: ${{ values.namespace }}.{{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: petclinic
port:
name: http
path: /
pathType: Prefix
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: petclinic
namespace: ${{ values.namespace }}
labels:
app: petclinic
spec:
replicas: 1
selector:
matchLabels:
app: petclinic
template:
metadata:
labels:
app: petclinic
spec:
containers:
- name: workload
image: gitea.{{{ .Env.DOMAIN }}}/giteaadmin/${{ values.name }}
env:
- name: SPRING_PROFILES_ACTIVE
value: postgres
- name: POSTGRES_URL
value: jdbc:postgresql://demo-db/petclinic
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: demo-db
key: username
- name: POSTGRES_PASS
valueFrom:
secretKeyRef:
name: demo-db
key: password
- name: SERVICE_BINDING_ROOT
value: /bindings
- name: SPRING_APPLICATION_JSON
value: |
{
"management.endpoint.health.probes.add-additional-paths": true
}
ports:
- name: http
containerPort: 8080
livenessProbe:
httpGet:
path: /livez
port: http
readinessProbe:
httpGet:
path: /readyz
port: http
volumeMounts:
- mountPath: /bindings/secret
name: binding
readOnly: true
volumes:
- name: binding
projected:
sources:
- secret:
name: demo-db
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: ${{ values.namespace }}-petclinic-monitor
namespace: monitoring # Namespace where Prometheus is running
labels:
release: kube-prometheus-stack # this must match Prometheus' service monitor matching
spec:
selector:
matchLabels:
app: petclinic # Match your application's labels
namespaceSelector:
matchNames:
- ${{ values.namespace }} # Namespace where your app is running
endpoints:
- port: http # Name of the port in your Service
path: /actuator/prometheus # Path to your metrics endpoint
interval: 15s # How often to scrape metrics

View file

@ -0,0 +1,84 @@
apiVersion: scaffolder.backstage.io/v1beta3
kind: Template
metadata:
name: spring-petclinic
title: Spring PetClinic template
description: An example template for the scaffolder that creates a 'fork' of Spring's PetClinic
spec:
owner: user:guest
type: service
parameters:
- title: Fill in some steps
required:
- name
properties:
name:
title: Project Name
type: string
description: Unique name of the fork app
ui:autofocus: true
upstreamurl:
title: Repo to Fork
type: string
description: The URL of the repo to fork
default: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/Patrick.Sy/ipcei-petclinic/src/branch/main
ui:emptyValue: 'https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/Patrick.Sy/ipcei-petclinic/src/branch/main'
steps:
- id: fetch-code # get the latest upstream code
name: Fetch Code
action: fetch:plain
input:
# url: https://github.com/spring-projects/spring-petclinic/tree/main
url: ${{ parameters.upstreamurl }}
- id: fetch-overrides # Apply specific overrides to add features and make modifications for compatibility
name: Fetch Overrides
action: fetch:template
input:
# url: ./skeleton/.github/workflows
# targetPath: ./.github/workflows
url: ./skeleton/
targetPath: ./
replace: true
values:
name: ${{ parameters.name }}
namespace: ${{ parameters.name }}
- id: publish
name: Publish to Gitea
action: publish:gitea
input:
repoUrl: gitea.{{{ .Env.DOMAIN }}}:443/?repo=${{parameters.name}}
description: This is the repository for ${{ parameters.name }}
sourcePath: ./
defaultBranch: main
- id: create-argocd-app
name: Create ArgoCD App
action: cnoe:create-argocd-app
input:
appName: ${{parameters.name}}
appNamespace: ${{parameters.name}}
argoInstance: in-cluster
projectName: default
# necessary until we generate our own cert
repoUrl: https://gitea.{{{ .Env.DOMAIN }}}:443/giteaAdmin/${{parameters.name}}
path: "k8s"
- id: register
name: Register in Catalog
action: catalog:register
input:
repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }}
catalogInfoPath: 'catalog-info.yaml'
output:
links:
- title: Repository
url: ${{ steps['publish'].output.remoteUrl }}
- title: Open in Catalog
icon: catalog
entityRef: ${{ steps['register'].output.entityRef }}

View file

@ -114,6 +114,8 @@ data:
host: gitea.{{{ .Env.DOMAIN }}}
username: ${GITEA_USERNAME}
password: ${GITEA_PASSWORD}
- baseUrl: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live
host: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live
# github:
# - host: github.com
# apps:

View file

@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: openbao
name: external-secrets-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]

View file

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: external-secrets-rolebinding
namespace: openbao
subjects:
- kind: ServiceAccount
name: external-secrets
namespace: external-secrets
roleRef:
kind: Role
name: external-secrets-role
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,20 @@
# cluster-store.yaml
apiVersion: external-secrets.io/v1beta1
kind: SecretStore #Kubernetes resource type
metadata:
name: bao-backend #resource name
namespace: openbao
spec:
provider:
vault: #specifies vault as the provider
# server: "http://10.244.0.28:8200" # how to map it dynamically?
server: "http://openbao.openbao.svc.cluster.local:8200"
path: "data" #path for accessing the secrets
version: "v1" #Vault API version
auth:
tokenSecretRef:
name: "vault-token" #Use a secret called vault-token
key: "token" #THIS REFERENCES THE INITIAL TOKEN NOW SAVED AS A K8 SECRET
# openbao-0.openbao.pod.cluster.local
# 10.96.59.250:8200

View file

@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: fibonacci-service
namespace: fibonacci-app
spec:
ingressClassName: nginx
rules:
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: fibonacci-service
port:
number: 9090
path: /fibonacci
pathType: Prefix

View file

@ -0,0 +1,30 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: keycloak-ingress-localhost
namespace: keycloak
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
ingressClassName: "nginx"
rules:
- host: localhost
http:
paths:
- path: /keycloak
pathType: ImplementationSpecific
backend:
service:
name: keycloak
port:
name: http
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- path: /keycloak
pathType: ImplementationSpecific
backend:
service:
name: keycloak
port:
name: http

View file

@ -1,6 +1,17 @@
server:
dev:
enabled: true
postStart:
- sh
- -c
- |
sleep 10
bao operator init >> /tmp/init.txt
cat /tmp/init.txt | grep "Key " | awk '{print $NF}' | xargs -I{} bao operator unseal {}
echo $(grep "Initial Root Token:" /tmp/init.txt | awk '{print $NF}')| cat > /openbao/data/initial_token.txt
echo $(grep "Unseal Key 1:" /tmp/init.txt | awk '{print $NF}')| cat > /openbao/data/unseal_key1.txt
echo $(grep "Unseal Key 2:" /tmp/init.txt | awk '{print $NF}')| cat > /openbao/data/unseal_key2.txt
echo $(grep "Unseal Key 3:" /tmp/init.txt | awk '{print $NF}')| cat > /openbao/data/unseal_key3.txt
echo $(grep "Unseal Key 4:" /tmp/init.txt | awk '{print $NF}')| cat > /openbao/data/unseal_key4.txt
echo $(grep "Unseal Key 5:" /tmp/init.txt | awk '{print $NF}')| cat > /openbao/data/unseal_key5.txt
rm /tmp/init.txt
ui:
enabled: true
enabled: true