Merge branch 'development' into IPCEICIS-2293_oidc_in_forgejo

This commit is contained in:
franz.germann 2025-04-09 12:15:41 +02:00
commit 951dcafc28
32 changed files with 449 additions and 140 deletions

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
/.history

View file

@ -16,9 +16,12 @@ spec:
name: in-cluster name: in-cluster
namespace: argocd namespace: argocd
sources: sources:
- repoURL: https://github.com/argoproj/argo-helm - repoURL: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/argocd-helm.git
path: charts/argo-cd path: charts/argo-cd
targetRevision: argo-cd-7.7.5 # TODO: RIRE Can be updated when https://github.com/argoproj/argo-cd/issues/20790 is fixed and merged
# As logout make problems, it is suggested to switch from path based routing to an own argocd domain,
# similar to the CNOE amazon reference implementation and in our case, Forgejo
targetRevision: argo-cd-7.8.14-depends
helm: helm:
valueFiles: valueFiles:
- $values/stacks/core/argocd/values.yaml - $values/stacks/core/argocd/values.yaml

View file

@ -5,6 +5,7 @@ configs:
params: params:
server.insecure: true server.insecure: true
server.basehref: /argocd server.basehref: /argocd
server.rootpath: /argocd
cm: cm:
application.resourceTrackingMethod: annotation application.resourceTrackingMethod: annotation
timeout.reconciliation: 60s timeout.reconciliation: 60s
@ -20,6 +21,7 @@ configs:
clusters: clusters:
- "*" - "*"
accounts.provider-argocd: apiKey accounts.provider-argocd: apiKey
url: https://{{{ .Env.DOMAIN }}}/argocd
rbac: rbac:
policy.csv: 'g, provider-argocd, role:admin' policy.csv: 'g, provider-argocd, role:admin'

View file

@ -1,9 +0,0 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-argocd
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-argocd:v0.9.1
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -1,9 +0,0 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-kind
spec:
package: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/provider-kind:v0.1.0
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -1,9 +0,0 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-shell
spec:
package: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/provider-shell:v0.1.1
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -28,8 +28,19 @@ spec:
# https://forgejo.org/docs/v1.21/admin/actions/#offline-registration # https://forgejo.org/docs/v1.21/admin/actions/#offline-registration
initContainers: initContainers:
- name: runner-register - name: runner-register
image: code.forgejo.org/forgejo/runner:6.0.1 image: code.forgejo.org/forgejo/runner:6.3.1
command: ["forgejo-runner", "register", "--no-interactive", "--token", $(RUNNER_SECRET), "--name", $(RUNNER_NAME), "--instance", $(FORGEJO_INSTANCE_URL), "--labels", "docker:docker://node:20-bookworm,ubuntu-22.04:docker://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/catthehackerubuntu:act-22.04"] command:
- "forgejo-runner"
- "register"
- "--no-interactive"
- "--token"
- $(RUNNER_SECRET)
- "--name"
- $(RUNNER_NAME)
- "--instance"
- $(FORGEJO_INSTANCE_URL)
- "--labels"
- "docker:docker://node:20-bookworm,ubuntu-22.04:docker://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/catthehackerubuntu:act-22.04,ubuntu-latest:docker://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/catthehackerubuntu:act-22.04"
env: env:
- name: RUNNER_NAME - name: RUNNER_NAME
valueFrom: valueFrom:
@ -47,7 +58,7 @@ spec:
mountPath: /data mountPath: /data
containers: containers:
- name: runner - name: runner
image: code.forgejo.org/forgejo/runner:6.0.1 image: code.forgejo.org/forgejo/runner:6.3.1
command: command:
- "sh" - "sh"
- "-c" - "-c"
@ -83,7 +94,7 @@ spec:
- name: runner-data - name: runner-data
mountPath: /data mountPath: /data
- name: daemon - name: daemon
image: docker:27.4.1-dind image: docker:28.0.4-dind
env: env:
- name: DOCKER_TLS_CERTDIR - name: DOCKER_TLS_CERTDIR
value: /certs value: /certs

View file

@ -0,0 +1,13 @@
gitea:
config:
oauth2_client:
ENABLE_AUTO_REGISTRATION: true
ACCOUNT_LINKING: auto
oauth:
- name: 'Keycloak'
provider: 'openidConnect'
# key: 'forgejo'
# secret: 'uWEGALJKmNyUojJaK5LAK0w4OCEEDpDu'
existingSecret: auth-generic-oauth-secret
autoDiscoverUrl: 'https://{{{ .Env.DOMAIN }}}/keycloak/realms/cnoe/.well-known/openid-configuration'
# admin-group: is to specify which keycloak group has forgejo admin permissions

View file

@ -16,9 +16,9 @@ spec:
name: in-cluster name: in-cluster
namespace: gitea namespace: gitea
sources: sources:
- repoURL: https://code.forgejo.org/forgejo-helm/forgejo-helm.git - repoURL: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/forgejo-helm.git
path: . path: .
targetRevision: v10.1.1 targetRevision: v11.0.5-depends
helm: helm:
valueFiles: valueFiles:
- $values/stacks/core/forgejo/values.yaml - $values/stacks/core/forgejo/values.yaml

View file

@ -30,14 +30,12 @@ gitea:
oauth2_client: oauth2_client:
ENABLE_AUTO_REGISTRATION: true ENABLE_AUTO_REGISTRATION: true
ACCOUNT_LINKING: auto ACCOUNT_LINKING: auto
# oauth: mailer:
# - name: 'Keycloak' ENABLED: true
# provider: 'openidConnect' FROM: forgejo@{{{ .Env.DOMAIN_GITEA }}}
# # key: 'forgejo' PROTOCOL: smtp
# # secret: 'uWEGALJKmNyUojJaK5LAK0w4OCEEDpDu' SMTP_ADDR: mailhog.mailhog.svc.cluster.local
# existingSecret: forgejo-oidc SMTP_PORT: 1025
# autoDiscoverUrl: 'https://{{{ .Env.DOMAIN }}}/keycloak/realms/cnoe/.well-known/openid-configuration'
# # admin-group: is to specify which keycloak group has forgejo admin permissions
service: service:
ssh: ssh:
@ -64,3 +62,4 @@ forgejo:
- docker:docker://node:16-bullseye - docker:docker://node:16-bullseye
- self-hosted:docker://ghcr.io/catthehacker/ubuntu:act-22.04 - self-hosted:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04 - ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-22.04

View file

@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: alloy
namespace: monitoring
spec:
ingressClassName: nginx
rules:
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: alloy
port:
number: 12345
path: /alloy
pathType: Prefix

View file

@ -4,8 +4,6 @@ metadata:
annotations: annotations:
nginx.ingress.kubernetes.io/backend-protocol: HTTP nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/force-ssl-redirect: "true" nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/use-regex: "true"
{{{ if eq .Env.CLUSTER_TYPE "osc" }}} {{{ if eq .Env.CLUSTER_TYPE "osc" }}}
dns.gardener.cloud/class: garden dns.gardener.cloud/class: garden
dns.gardener.cloud/dnsnames: {{{ .Env.DOMAIN }}} dns.gardener.cloud/dnsnames: {{{ .Env.DOMAIN }}}
@ -24,8 +22,8 @@ spec:
name: argocd-server name: argocd-server
port: port:
number: 80 number: 80
path: /argocd(/|$)(.*) path: /argocd
pathType: ImplementationSpecific pathType: Prefix
tls: tls:
- hosts: - hosts:
- {{{ .Env.DOMAIN }}} - {{{ .Env.DOMAIN }}}

View file

@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mailhog
namespace: mailhog
spec:
ingressClassName: nginx
rules:
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: mailhog
port:
number: 8025
path: /mailhog
pathType: Prefix

View file

@ -16,9 +16,9 @@ spec:
name: in-cluster name: in-cluster
namespace: ingress-nginx namespace: ingress-nginx
sources: sources:
- repoURL: https://github.com/kubernetes/ingress-nginx - repoURL: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/ingress-nginx-helm.git
path: charts/ingress-nginx path: charts/ingress-nginx
targetRevision: helm-chart-4.11.3 targetRevision: helm-chart-4.12.1-depends
helm: helm:
valueFiles: valueFiles:
- $values/stacks/core/ingress-nginx/values.yaml - $values/stacks/core/ingress-nginx/values.yaml

View file

@ -1,7 +1,7 @@
apiVersion: argoproj.io/v1alpha1 apiVersion: argoproj.io/v1alpha1
kind: Application kind: Application
metadata: metadata:
name: promtail name: alloy
namespace: argocd namespace: argocd
labels: labels:
env: dev env: dev
@ -18,12 +18,12 @@ spec:
name: in-cluster name: in-cluster
namespace: monitoring namespace: monitoring
sources: sources:
- repoURL: https://github.com/grafana/helm-charts - repoURL: https://github.com/grafana/alloy.git
path: charts/promtail path: operations/helm/charts/alloy
targetRevision: HEAD targetRevision: HEAD
helm: helm:
valueFiles: valueFiles:
- $values/stacks/monitoring/promtail/values.yaml - $values/stacks/monitoring/alloy/values.yaml
- repoURL: https://{{{ .Env.DOMAIN_GITEA }}}/giteaAdmin/edfbuilder - repoURL: https://{{{ .Env.DOMAIN_GITEA }}}/giteaAdmin/edfbuilder
targetRevision: HEAD targetRevision: HEAD
ref: values ref: values

View file

@ -0,0 +1,78 @@
alloy:
create: false
name: alloy-config
key: config.alloy
uiPathPrefix: "/alloy"
configMap:
content: |-
logging {
level = "info"
format = "logfmt"
}
loki.write "local_loki" {
endpoint {
url = "http://loki-loki-distributed-gateway/loki/api/v1/push"
}
}
discovery.kubernetes "pod" {
role = "pod"
}
discovery.kubernetes "nodes" {
role = "node"
}
discovery.kubernetes "services" {
role = "service"
}
discovery.kubernetes "endpoints" {
role = "endpoints"
}
discovery.kubernetes "endpointslices" {
role = "endpointslice"
}
discovery.kubernetes "ingresses" {
role = "ingress"
}
discovery.relabel "pod_logs" {
targets = discovery.kubernetes.pod.targets
rule {
source_labels = ["__meta_kubernetes_namespace"]
action = "replace"
target_label = "namespace"
}
rule {
source_labels = ["__meta_kubernetes_pod_name"]
action = "replace"
target_label = "pod"
}
rule {
source_labels = ["__meta_kubernetes_pod_node_name"]
action = "replace"
target_label = "node"
}
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "container"
}
}
loki.source.kubernetes "all_pod_logs" {
targets = discovery.relabel.pod_logs.output
forward_to = [loki.write.local_loki.receiver]
}

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-prometheus-sso
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://{{{ .Env.DOMAIN_GITEA }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "stacks/monitoring/kube-prometheus-sso"
destination:
server: "https://kubernetes.default.svc"
namespace: monitoring
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
selfHeal: true
retry:
limit: -1
backoff:
duration: 15s
factor: 1
maxDuration: 15s

View file

@ -0,0 +1,21 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: auth-generic-oauth-secret
namespace: monitoring
spec:
secretStoreRef:
name: keycloak
kind: ClusterSecretStore
refreshInterval: "0"
target:
name: auth-generic-oauth-secret
template:
engineVersion: v2
data:
client_secret: "{{.GRAFANA_CLIENT_SECRET}}"
data:
- secretKey: GRAFANA_CLIENT_SECRET
remoteRef:
key: keycloak-clients
property: GRAFANA_CLIENT_SECRET

View file

@ -15,6 +15,12 @@ spec:
syncOptions: syncOptions:
- CreateNamespace=true - CreateNamespace=true
- ServerSideApply=true # do not copy metdata, since (because of its large size) it can lead to sync failure - ServerSideApply=true # do not copy metdata, since (because of its large size) it can lead to sync failure
retry:
limit: -1
backoff:
duration: 15s
factor: 1
maxDuration: 15s
destination: destination:
name: in-cluster name: in-cluster
namespace: monitoring namespace: monitoring

View file

@ -110,12 +110,12 @@ data:
"uid": "P8E80F9AEF21F6940" "uid": "P8E80F9AEF21F6940"
}, },
"editorMode": "builder", "editorMode": "builder",
"expr": "{container=\"promtail\"} |= ``", "expr": "{container=\"alloy\"} |= ``",
"queryType": "range", "queryType": "range",
"refId": "A" "refId": "A"
} }
], ],
"title": "Logs: Container promtail", "title": "Logs: Container alloy",
"type": "logs" "type": "logs"
}, },
{ {

View file

@ -33,6 +33,32 @@ grafana:
domain: {{{ .Env.DOMAIN }}} domain: {{{ .Env.DOMAIN }}}
root_url: "%(protocol)s://%(domain)s/grafana" root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true serve_from_sub_path: true
auth:
disable_login: true
disable_login_form: true
auth.generic_oauth:
enabled: true
name: Keycloak-OAuth
allow_sign_up: true
use_refresh_token: true
client_id: grafana
client_secret: $__file{/etc/secrets/auth_generic_oauth/client_secret}
scopes: openid email profile offline_access roles
email_attribute_path: email
login_attribute_path: username
name_attribute_path: full_name
auth_url: https://{{{ .Env.DOMAIN }}}/keycloak/realms/cnoe/protocol/openid-connect/auth
token_url: https://{{{ .Env.DOMAIN }}}/keycloak/realms/cnoe/protocol/openid-connect/token
api_url: https://{{{ .Env.DOMAIN }}}/keycloak/realms/cnoe/protocol/openid-connect/userinfo
redirect_uri: http://{{{ .Env.DOMAIN }}}/grafana/login/generic_oauth
role_attribute_path: "contains(groups[*], 'admin') && 'Admin' || contains(groups[*], 'editor') && 'Editor' || 'Viewer'"
extraSecretMounts:
- name: auth-generic-oauth-secret-mount
secretName: auth-generic-oauth-secret
defaultMode: 0440
mountPath: /etc/secrets/auth_generic_oauth
readOnly: true
serviceMonitor: serviceMonitor:
# If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator # If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator

View file

@ -1,45 +0,0 @@
# -- Overrides the chart's name
nameOverride: null
# -- Overrides the chart's computed fullname
fullnameOverride: null
global:
# -- Allow parent charts to override registry hostname
imageRegistry: ""
# -- Allow parent charts to override registry credentials
imagePullSecrets: []
daemonset:
# -- Deploys Promtail as a DaemonSet
enabled: true
autoscaling:
# -- Creates a VerticalPodAutoscaler for the daemonset
enabled: false
deployment:
# -- Deploys Promtail as a Deployment
enabled: false
config:
enabled: true
logLevel: info
logFormat: logfmt
serverPort: 3101
clients:
- url: http://loki-loki-distributed-gateway/loki/api/v1/push
scrape_configs:
- job_name: authlog
static_configs:
- targets:
- authlog
labels:
job: authlog
__path__: /logs/auth.log
- job_name: syslog
static_configs:
- targets:
- syslog
labels:
job: syslog
__path__: /logs/syslog

View file

@ -23,3 +23,7 @@ spec:
selfHeal: true selfHeal: true
retry: retry:
limit: -1 limit: -1
backoff:
duration: 15s
factor: 1
maxDuration: 15s

View file

@ -1,32 +0,0 @@
# This workflow will build a Java project with Gradle, and cache/restore any dependencies to improve the workflow execution time
# For more information see: https://docs.github.com/en/actions/use-cases-and-examples/building-and-testing/building-and-testing-java-with-gradle
name: Java CI with Gradle
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
java: [ '17' ]
steps:
- uses: actions/checkout@v4
- name: Set up JDK {% raw %}${{matrix.java}}{% endraw %}
uses: https://github.com/actions/setup-java@v4
with:
java-version: '{% raw %}${{matrix.java}}{% endraw %}'
distribution: 'adopt'
cache: maven
- name: Setup Gradle
uses: https://github.com/gradle/actions/setup-gradle@v4
- name: Build with Gradle
run: ./gradlew build

View file

@ -28,12 +28,12 @@ jobs:
distribution: 'adopt' distribution: 'adopt'
cache: maven cache: maven
- name: Build with Maven Wrapper - name: Build with Maven Wrapper
run: ./mvnw -B verify run: ./mvnw -B -DskipTests verify
- name: Build image - name: Build image
#run: ./mvnw spring-boot:build-image # the original image build #run: ./mvnw spring-boot:build-image # the original image build
run: | run: |
export CONTAINER_REPO=$(echo {% raw %}${{ env.GITHUB_REPOSITORY }}{% endraw %} | tr '[:upper:]' '[:lower:]') export CONTAINER_REPO=$(echo {% raw %}${{ env.GITHUB_REPOSITORY }}{% endraw %} | tr '[:upper:]' '[:lower:]')
./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:build -Djib.allowInsecureRegistries=true -Dimage={{{ .Env.DOMAIN_GITEA }}}/${CONTAINER_REPO}:latest -Djib.to.auth.username={% raw %}${{ github.actor }}{% endraw %} -Djib.to.auth.password={% raw %}${{ secrets.PACKAGES_TOKEN }}{% endraw %} ./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:build -Djib.allowInsecureRegistries=true -Dimage={{{ .Env.DOMAIN_GITEA }}}/${CONTAINER_REPO}:latest -Djib.to.auth.username={% raw %}${{ secrets.PACKAGES_USER }}{% endraw %} -Djib.to.auth.password={% raw %}${{ secrets.PACKAGES_TOKEN }}{% endraw %} -Djib.from.platforms=linux/arm64,linux/amd64
- name: Build image as tar - name: Build image as tar
run: | run: |
./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:buildTar -Djib.allowInsecureRegistries=true ./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:buildTar -Djib.allowInsecureRegistries=true
@ -57,7 +57,11 @@ jobs:
NODE_TLS_REJECT_UNAUTHORIZED: 0 # This is necessary due to self signed certs for forgejo, proper setups can skip this NODE_TLS_REJECT_UNAUTHORIZED: 0 # This is necessary due to self signed certs for forgejo, proper setups can skip this
- name: install trivy from deb package - name: install trivy from deb package
run: | run: |
wget -O trivy.deb https://github.com/aquasecurity/trivy/releases/download/v0.58.0/trivy_0.58.0_Linux-64bit.deb if [[ "$(uname -m)" == "x86_64" ]]; then
wget -O trivy.deb https://github.com/aquasecurity/trivy/releases/download/v0.58.0/trivy_0.58.0_Linux-64bit.deb
else
wget -O trivy.deb https://github.com/aquasecurity/trivy/releases/download/v0.58.0/trivy_0.58.0_Linux-ARM64.deb
fi
DEBIAN_FRONTEND=noninteractive dpkg -i trivy.deb DEBIAN_FRONTEND=noninteractive dpkg -i trivy.deb
- name: scan the image - name: scan the image
run: trivy image --input jib-image.tar run: trivy image --input jib-image.tar

View file

@ -23,3 +23,7 @@ spec:
selfHeal: true selfHeal: true
retry: retry:
limit: -1 limit: -1
backoff:
duration: 15s
factor: 1
maxDuration: 15s

View file

@ -255,6 +255,8 @@ spec:
value: debug value: debug
- name: NODE_TLS_REJECT_UNAUTHORIZED - name: NODE_TLS_REJECT_UNAUTHORIZED
value: "0" value: "0"
- name: NODE_OPTIONS
value: "--no-node-snapshot"
envFrom: envFrom:
- secretRef: - secretRef:
name: backstage-env-vars name: backstage-env-vars
@ -262,7 +264,7 @@ spec:
name: gitea-credentials name: gitea-credentials
- secretRef: - secretRef:
name: argocd-credentials name: argocd-credentials
image: ghcr.io/cnoe-io/backstage-app:9232d633b2698fffa6d0a73b715e06640d170162 image: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/backstage-edp:development
name: backstage name: backstage
ports: ports:
- containerPort: 7007 - containerPort: 7007
@ -386,7 +388,7 @@ spec:
KEYCLOAK_NAME_METADATA: https://{{{ .Env.DOMAIN }}}:443/keycloak/realms/cnoe/.well-known/openid-configuration KEYCLOAK_NAME_METADATA: https://{{{ .Env.DOMAIN }}}:443/keycloak/realms/cnoe/.well-known/openid-configuration
KEYCLOAK_CLIENT_SECRET: "{{.BACKSTAGE_CLIENT_SECRET}}" KEYCLOAK_CLIENT_SECRET: "{{.BACKSTAGE_CLIENT_SECRET}}"
ARGOCD_AUTH_TOKEN: "argocd.token={{.ARGOCD_SESSION_TOKEN}}" ARGOCD_AUTH_TOKEN: "argocd.token={{.ARGOCD_SESSION_TOKEN}}"
ARGO_CD_URL: 'https://argocd-server.argocd.svc.cluster.local/api/v1/' ARGO_CD_URL: 'https://{{{ .Env.DOMAIN }}}/argocd/api/v1/'
data: data:
- secretKey: ARGOCD_SESSION_TOKEN - secretKey: ARGOCD_SESSION_TOKEN
remoteRef: remoteRef:

View file

@ -208,6 +208,34 @@ data:
"webOrigins": [ "webOrigins": [
"/*" "/*"
] ]
grafana-client-payload.json: |
{
"clientId": "grafana",
"name": "Grafana Client",
"description": "Used for Grafana SSO",
"rootUrl": "https://{{{ .Env.DOMAIN }}}/grafana",
"adminUrl": "https://{{{ .Env.DOMAIN }}}/grafana",
"baseUrl": "https://{{{ .Env.DOMAIN }}}/grafana",
"alwaysDisplayInConsole": false,
"redirectUris": [
"http://{{{ .Env.DOMAIN }}}/grafana/*"
],
"webOrigins": [
"https://{{{ .Env.DOMAIN }}}/grafana"
],
"standardFlowEnabled": true,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"serviceAccountsEnabled": false,
"publicClient": false,
"frontchannelLogout": true,
"protocol": "openid-connect",
"attributes": {
"saml_idp_initiated_sso_url_name": "",
"oidc.ciba.grant.enabled": "false",
"oauth2.device.authorization.grant.enabled": "false"
},
"defaultClientScopes": [ "defaultClientScopes": [
"web-origins", "web-origins",
"acr", "acr",
@ -285,7 +313,11 @@ spec:
fi fi
set -e set -e
curl -sS -LO "https://dl.k8s.io/release/v1.28.3//bin/linux/amd64/kubectl" if [[ "$(uname -m)" == "x86_64" ]]; then
curl -sS -LO "https://dl.k8s.io/release/v1.28.3//bin/linux/amd64/kubectl"
else
curl -sS -LO "https://dl.k8s.io/release/v1.28.3//bin/linux/arm64/kubectl"
fi
chmod +x kubectl chmod +x kubectl
echo "creating cnoe realm and groups" echo "creating cnoe realm and groups"
@ -369,6 +401,23 @@ spec:
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret') -X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
echo "creating Grafana client"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/grafana-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "grafana") | .id')
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
GRAFANA_CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
echo "creating Backstage client" echo "creating Backstage client"
curl -sS -H "Content-Type: application/json" \ curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
@ -415,7 +464,7 @@ spec:
ARGOCD_PASSWORD=$(./kubectl -n argocd get secret argocd-initial-admin-secret -o go-template='{{.data.password | base64decode }}') ARGOCD_PASSWORD=$(./kubectl -n argocd get secret argocd-initial-admin-secret -o go-template='{{.data.password | base64decode }}')
ARGOCD_SESSION_TOKEN=$(curl -k -sS http://argocd-server.argocd.svc.cluster.local:443/api/v1/session -H 'Content-Type: application/json' -d "{\"username\":\"admin\",\"password\":\"${ARGOCD_PASSWORD}\"}" | jq -r .token) ARGOCD_SESSION_TOKEN=$(curl -sS https://{{{ .Env.DOMAIN }}}/argocd/api/v1/session -H 'Content-Type: application/json' -d "{\"username\":\"admin\",\"password\":\"${ARGOCD_PASSWORD}\"}" | jq -r .token)
echo \ echo \
"apiVersion: v1 "apiVersion: v1
@ -432,6 +481,8 @@ spec:
BACKSTAGE_CLIENT_ID: backstage BACKSTAGE_CLIENT_ID: backstage
FORGEJO_CLIENT_SECRET: ${FORGEJO_CLIENT_SECRET} FORGEJO_CLIENT_SECRET: ${FORGEJO_CLIENT_SECRET}
FORGEJO_CLIENT_ID: forgejo FORGEJO_CLIENT_ID: forgejo
GRAFANA_CLIENT_SECRET: ${GRAFANA_CLIENT_SECRET}
GRAFANA_CLIENT_ID: grafana
" > /tmp/secret.yaml " > /tmp/secret.yaml
./kubectl apply -f /tmp/secret.yaml ./kubectl apply -f /tmp/secret.yaml

View file

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mailhog
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://{{{ .Env.DOMAIN_GITEA }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "stacks/ref-implementation/mailhog"
destination:
server: "https://kubernetes.default.svc"
namespace: mailhog
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
selfHeal: true
retry:
limit: -1

View file

@ -0,0 +1,54 @@
# Mailhog
[MailHog is an email testing tool for developers](https://github.com/mailhog/MailHog).
## In cluster SMTP service
Ypu can send ESMTP emails in the cluster to `mailhog.mailhog.svc.cluster.local`, standard port `1025`, as defined in the service manifest:
```yaml
apiVersion: v1
kind: Service
metadata:
name: mailhog
spec:
ports:
- name: smtp
port: 1025
```
## Ingress
Mailhog offers both WebUi and API at `https://{{{ .Env.DOMAIN }}}/mailhog`.
The ingress definition is in `stacks/core/ingress-apps/mailhog.yaml` (BTW, why isn't this ingress file here in this folder ??) routing to the mailhog' service
```yaml
spec:
rules:
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
...
path: /mailhog
```
## API
For usage of the API see https://github.com/mailhog/MailHog/blob/master/docs/APIv2.md
## Tests
```bash
kubectl run busybox --rm -it --image=busybox -- /bin/sh
# inside bsybox
wget -O- http://mailhog.mailhog.svc.cluster.local:8025/mailhog
# check smtp port
nc -zv mailhog.mailhog.svc.cluster.local 1025
# send esmtp, first install swaks
swaks --to test@example.com --from test@example.com --server mailhog:1025 --data "Subject: Test-Mail\n\nDies ist eine Test-Mail."
```

View file

@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mailhog-deployment
namespace: mailhog
spec:
replicas: 1
selector:
matchLabels:
app: mailhog
template:
metadata:
labels:
app: mailhog
spec:
containers:
- name: mailhog
image: mailhog/mailhog
env:
- name: MH_UI_WEB_PATH # set this to same value as in ingress stacks/core/ingress-apps/mailhog.yaml
value: mailhog
ports:
- containerPort: 1025
name: smtp
- containerPort: 8025
name: http
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: mailhog
spec:
selector:
app: mailhog
ports:
- name: smtp
port: 1025
- name: http
port: 8025
type: ClusterIP