Compare commits

..

1 commit

Author SHA1 Message Date
Richard Robert Reitz
1813371a88 Added hydrated version of nginx-ingress, argocd and forgejo 2025-02-19 20:40:59 +01:00
111 changed files with 30162 additions and 3853 deletions

1
.gitignore vendored
View file

@ -1 +0,0 @@
/.history

View file

@ -1,6 +1,6 @@
# edpbuilder stacks
This repository contains the building blocks to instantiate Internal Developer Platforms.
This repository contains the building blocks to instanciate Internal Developer Platform's.
### Install edpbuilder

View file

@ -12,8 +12,8 @@ spec:
name: in-cluster
namespace: argocd
source:
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/registry"
repoURL: "https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}"
path: registry
repoURL: 'https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder'
targetRevision: HEAD
project: default
syncPolicy:

View file

@ -12,8 +12,8 @@ spec:
name: in-cluster
namespace: argocd
source:
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/core"
repoURL: "https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}"
path: stacks/core
repoURL: 'https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder'
targetRevision: HEAD
project: default
syncPolicy:

View file

@ -12,8 +12,8 @@ spec:
name: in-cluster
namespace: argocd
source:
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/local-backup"
repoURL: "https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}"
path: stacks/local-backup
repoURL: 'https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder'
targetRevision: HEAD
project: default
syncPolicy:

View file

@ -1,7 +1,7 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: otc
name: monitoring
namespace: argocd
labels:
env: dev
@ -12,8 +12,8 @@ spec:
name: in-cluster
namespace: argocd
source:
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/otc"
repoURL: "https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}"
path: stacks/monitoring
repoURL: 'https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder'
targetRevision: HEAD
project: default
syncPolicy:

View file

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: observability-client
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/observability-client"
repoURL: "https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}"
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -12,8 +12,8 @@ spec:
name: in-cluster
namespace: argocd
source:
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/ref-implementation"
repoURL: "https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}"
path: stacks/ref-implementation
repoURL: 'https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder'
targetRevision: HEAD
project: default
syncPolicy:

View file

@ -1,7 +1,7 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forgejo
name: second-cluster
namespace: argocd
labels:
env: dev
@ -12,8 +12,8 @@ spec:
name: in-cluster
namespace: argocd
source:
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/forgejo"
repoURL: "https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}"
path: stacks/second-cluster
repoURL: 'https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder'
targetRevision: HEAD
project: default
syncPolicy:

View file

@ -12,24 +12,16 @@ spec:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: argocd
sources:
- repoURL: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/argocd-helm.git
- repoURL: https://github.com/argoproj/argo-helm
path: charts/argo-cd
# TODO: RIRE Can be updated when https://github.com/argoproj/argo-cd/issues/20790 is fixed and merged
# As logout make problems, it is suggested to switch from path based routing to an own argocd domain,
# similar to the CNOE amazon reference implementation and in our case, Forgejo
targetRevision: argo-cd-7.8.14-depends
targetRevision: argo-cd-7.7.5
helm:
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/core/argocd/values.yaml
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
- $values/stacks/core/argocd/values.yaml
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/core/argocd/manifests"

File diff suppressed because it is too large Load diff

View file

@ -1,9 +1,10 @@
global:
domain: {{{ .Env.DOMAIN_ARGOCD }}}
domain: {{{ .Env.DOMAIN }}}
configs:
params:
server.insecure: true
server.basehref: /argocd
cm:
application.resourceTrackingMethod: annotation
timeout.reconciliation: 60s
@ -19,7 +20,6 @@ configs:
clusters:
- "*"
accounts.provider-argocd: apiKey
url: https://{{{ .Env.DOMAIN_ARGOCD }}}
rbac:
policy.csv: 'g, provider-argocd, role:admin'

View file

@ -0,0 +1,23 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane-compositions
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: crossplane-system
source:
path: stacks/core/crossplane-compositions
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
directory:
recurse: true

View file

@ -0,0 +1,30 @@
apiVersion: apiextensions.crossplane.io/v1
kind: CompositeResourceDefinition
metadata:
name: edfbuilders.edfbuilder.crossplane.io
spec:
connectionSecretKeys:
- kubeconfig
group: edfbuilder.crossplane.io
names:
kind: EDFBuilder
listKind: EDFBuilderList
plural: edfbuilders
singular: edfbuilders
versions:
- name: v1alpha1
served: true
referenceable: true
schema:
openAPIV3Schema:
description: A EDFBuilder is a composite resource that represents a K8S Cluster with edfbuilder Installed
type: object
properties:
spec:
type: object
properties:
repoURL:
type: string
description: URL to ArgoCD stack of stacks repo
required:
- repoURL

View file

@ -0,0 +1,23 @@
{{{ if eq .Env.CLUSTER_TYPE "kind" }}}
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane-providers
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: crossplane-system
source:
path: stacks/core/crossplane-providers
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
{{{ end }}}

View file

@ -0,0 +1,9 @@
apiVersion: pkg.crossplane.io/v1
kind: Function
metadata:
name: crossplane-contrib-function-patch-and-transform
spec:
package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -0,0 +1,14 @@
apiVersion: argocd.crossplane.io/v1alpha1
kind: ProviderConfig
metadata:
name: argocd-provider
spec:
serverAddr: argocd-server.argocd.svc.cluster.local:80
insecure: true
plainText: true
credentials:
source: Secret
secretRef:
namespace: crossplane-system
name: argocd-credentials
key: authToken

View file

@ -0,0 +1,9 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-argocd
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-argocd:v0.9.1
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -0,0 +1,14 @@
apiVersion: kind.crossplane.io/v1alpha1
kind: ProviderConfig
metadata:
name: kind-provider
spec:
credentials:
source: Secret
secretRef:
namespace: crossplane-system
name: kind-credentials
key: credentials
endpoint:
# the url is managed by crossplane-edfbuilder
url: https://DOCKER_HOST:SERVER_PORT/api/v1/kindserver

View file

@ -0,0 +1,9 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-kind
spec:
package: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/provider-kind:v0.1.0
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -0,0 +1,9 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-shell
spec:
package: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/provider-shell:v0.1.1
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -0,0 +1,23 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: crossplane-system
source:
chart: crossplane
repoURL: https://charts.crossplane.io/stable
targetRevision: 1.18.0
helm:
releaseName: crossplane

View file

@ -0,0 +1,27 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forgejo
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: gitea
sources:
- repoURL: https://code.forgejo.org/forgejo-helm/forgejo-helm.git
path: .
targetRevision: v10.1.1
helm:
valueFiles:
- $values/stacks/core/forgejo/values.yaml
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,619 @@
---
# Source: forgejo/templates/gitea/config.yaml
apiVersion: v1
kind: Secret
metadata:
name: forgejo-inline-config
namespace: "gitea"
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
_generals_: |-
APP_NAME=Forgejo: Beyond coding. We forge.
RUN_MODE=prod
cache: |-
ADAPTER=memory
HOST=
database: DB_TYPE=sqlite3
indexer: ISSUE_INDEXER_TYPE=db
metrics: ENABLED=false
queue: |-
CONN_STR=
TYPE=level
repository: ROOT=/data/git/gitea-repositories
security: INSTALL_LOCK=true
server: |-
APP_DATA_PATH=/data
DOMAIN=gitea.runner.c-one-infra.de
ENABLE_PPROF=false
HTTP_PORT=3000
PROTOCOL=http
ROOT_URL=https://gitea.runner.c-one-infra.de:443
SSH_DOMAIN=gitea.runner.c-one-infra.de
SSH_LISTEN_PORT=2222
SSH_PORT=22
START_SSH_SERVER=true
session: |-
PROVIDER=memory
PROVIDER_CONFIG=
---
# Source: forgejo/templates/gitea/config.yaml
apiVersion: v1
kind: Secret
metadata:
name: forgejo
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
assertions: |
config_environment.sh: |-
#!/usr/bin/env bash
set -euo pipefail
function env2ini::log() {
printf "${1}\n"
}
function env2ini::read_config_to_env() {
local section="${1}"
local line="${2}"
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
if [[ -z "${section}" ]]; then
export "FORGEJO____${setting^^}=${value}" # '^^' makes the variable content uppercase
return
fi
local masked_section="${section//./_0X2E_}" # '//' instructs to replace all matches
masked_section="${masked_section//-/_0X2D_}"
export "FORGEJO__${masked_section^^}__${setting^^}=${value}" # '^^' makes the variable content uppercase
}
function env2ini::reload_preset_envs() {
env2ini::log "Reloading preset envs..."
while read -r line; do
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
export "${setting^^}=${value}" # '^^' makes the variable content uppercase
done < "/tmp/existing-envs"
rm /tmp/existing-envs
}
function env2ini::process_config_file() {
local config_file="${1}"
local section="$(basename "${config_file}")"
if [[ $section == '_generals_' ]]; then
env2ini::log " [ini root]"
section=''
else
env2ini::log " ${section}"
fi
while read -r line; do
env2ini::read_config_to_env "${section}" "${line}"
done < <(awk 1 "${config_file}") # Helm .toYaml trims the trailing new line which breaks line processing; awk 1 ... adds it back while reading
}
function env2ini::load_config_sources() {
local path="${1}"
if [[ -d "${path}" ]]; then
env2ini::log "Processing $(basename "${path}")..."
while read -d '' configFile; do
env2ini::process_config_file "${configFile}"
done < <(find "${path}" -type l -not -name '..data' -print0)
env2ini::log "\n"
fi
}
function env2ini::generate_initial_secrets() {
# These environment variables will either be
# - overwritten with user defined values,
# - initially used to set up Forgejo
# Anyway, they won't harm existing app.ini files
export FORGEJO__SECURITY__INTERNAL_TOKEN=$(gitea generate secret INTERNAL_TOKEN)
export FORGEJO__SECURITY__SECRET_KEY=$(gitea generate secret SECRET_KEY)
export FORGEJO__OAUTH2__JWT_SECRET=$(gitea generate secret JWT_SECRET)
export FORGEJO__SERVER__LFS_JWT_SECRET=$(gitea generate secret LFS_JWT_SECRET)
env2ini::log "...Initial secrets generated\n"
}
# save existing envs prior to script execution. Necessary to keep order of
# preexisting and custom envs
env | (grep -e '^FORGEJO__' || [[ $? == 1 ]]) > /tmp/existing-envs
# MUST BE CALLED BEFORE OTHER CONFIGURATION
env2ini::generate_initial_secrets
env2ini::load_config_sources '/env-to-ini-mounts/inlines/'
env2ini::load_config_sources '/env-to-ini-mounts/additionals/'
# load existing envs to override auto generated envs
env2ini::reload_preset_envs
env2ini::log "=== All configuration sources loaded ===\n"
# safety to prevent rewrite of secret keys if an app.ini already exists
if [ -f ${GITEA_APP_INI} ]; then
env2ini::log 'An app.ini file already exists. To prevent overwriting secret keys, these settings are dropped and remain unchanged:'
env2ini::log ' - security.INTERNAL_TOKEN'
env2ini::log ' - security.SECRET_KEY'
env2ini::log ' - oauth2.JWT_SECRET'
env2ini::log ' - server.LFS_JWT_SECRET'
unset FORGEJO__SECURITY__INTERNAL_TOKEN
unset FORGEJO__SECURITY__SECRET_KEY
unset FORGEJO__OAUTH2__JWT_SECRET
unset FORGEJO__SERVER__LFS_JWT_SECRET
fi
environment-to-ini -o $GITEA_APP_INI
---
# Source: forgejo/templates/gitea/init.yaml
apiVersion: v1
kind: Secret
metadata:
name: forgejo-init
namespace: "gitea"
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
configure_gpg_environment.sh: |-
#!/usr/bin/env bash
set -eu
gpg --batch --import /raw/private.asc
init_directory_structure.sh: |-
#!/usr/bin/env bash
set -euo pipefail
set -x
mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure
mkdir -p "${GITEA_TEMP}"
chmod ug+rwx "${GITEA_TEMP}"
configure_gitea.sh: |-
#!/usr/bin/env bash
set -euo pipefail
echo '==== BEGIN GITEA CONFIGURATION ===='
{ # try
gitea migrate
} || { # catch
echo "Forgejo migrate might fail due to database connection...This init-container will try again in a few seconds"
exit 1
}
function configure_admin_user() {
local full_admin_list=$(gitea admin user list --admin)
local actual_user_table=''
# We might have distorted output due to warning logs, so we have to detect the actual user table by its headline and trim output above that line
local regex="(.*)(ID\s+Username\s+Email\s+IsActive.*)"
if [[ "${full_admin_list}" =~ $regex ]]; then
actual_user_table=$(echo "${BASH_REMATCH[2]}" | tail -n+2) # tail'ing to drop the table headline
else
# This code block should never be reached, as long as the output table header remains the same.
# If this code block is reached, the regex doesn't match anymore and we probably have to adjust this script.
echo "ERROR: 'configure_admin_user' was not able to determine the current list of admin users."
echo " Please review the output of 'gitea admin user list --admin' shown below."
echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-chart/issues."
echo "DEBUG: Output of 'gitea admin user list --admin'"
echo "--"
echo "${full_admin_list}"
echo "--"
exit 1
fi
local ACCOUNT_ID=$(echo "${actual_user_table}" | grep -E "\s+${GITEA_ADMIN_USERNAME}\s+" | awk -F " " "{printf \$1}")
if [[ -z "${ACCOUNT_ID}" ]]; then
local -a create_args
create_args=(--admin --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --email "gitea@local.domain")
if [[ "${GITEA_ADMIN_PASSWORD_MODE}" = initialOnlyRequireReset ]]; then
create_args+=(--must-change-password=true)
else
create_args+=(--must-change-password=false)
fi
echo "No admin user '${GITEA_ADMIN_USERNAME}' found. Creating now..."
gitea admin user create "${create_args[@]}"
echo '...created.'
else
if [[ "${GITEA_ADMIN_PASSWORD_MODE}" = keepUpdated ]]; then
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist. Running update to sync password..."
local -a change_args
change_args=(--username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --must-change-password=false)
gitea admin user change-password "${change_args[@]}"
echo '...password sync done.'
else
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist, but update mode is set to '${GITEA_ADMIN_PASSWORD_MODE}'. Skipping."
fi
fi
}
configure_admin_user
function configure_ldap() {
echo 'no ldap configuration... skipping.'
}
configure_ldap
function configure_oauth() {
echo 'no oauth configuration... skipping.'
}
configure_oauth
echo '==== END GITEA CONFIGURATION ===='
---
# Source: forgejo/templates/gitea/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-shared-storage
namespace: "gitea"
annotations:
helm.sh/resource-policy: keep
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 5Gi
---
# Source: forgejo/templates/gitea/http-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: forgejo-http
namespace: "gitea"
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
annotations:
{}
spec:
type: ClusterIP
clusterIP: None
ports:
- name: http
port: 3000
targetPort:
selector:
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
---
# Source: forgejo/templates/gitea/ssh-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: forgejo-ssh
namespace: "gitea"
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
annotations:
{}
spec:
type: NodePort
externalTrafficPolicy: Local
ports:
- name: ssh
port: 22
targetPort: 2222
protocol: TCP
nodePort: 32222
selector:
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
---
# Source: forgejo/templates/gitea/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: forgejo
namespace: "gitea"
annotations:
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 100%
selector:
matchLabels:
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
template:
metadata:
annotations:
checksum/config: bd40d1abc2e2692fafe0821e9345e231261f194a19fbee5aef7dd9cdfc106596
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
spec:
securityContext:
fsGroup: 1000
initContainers:
- name: init-directories
image: "code.forgejo.org/forgejo/forgejo:9.0.2-rootless"
imagePullPolicy: IfNotPresent
command: ["/usr/sbin/init_directory_structure.sh"]
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
volumeMounts:
- name: init
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
securityContext:
{}
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
- name: init-app-ini
image: "code.forgejo.org/forgejo/forgejo:9.0.2-rootless"
imagePullPolicy: IfNotPresent
command: ["/usr/sbin/config_environment.sh"]
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
volumeMounts:
- name: config
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
- name: inline-config-sources
mountPath: /env-to-ini-mounts/inlines/
securityContext:
{}
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
- name: configure-gitea
image: "code.forgejo.org/forgejo/forgejo:9.0.2-rootless"
command: ["/usr/sbin/configure_gitea.sh"]
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 1000
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: HOME
value: /data/gitea/git
- name: GITEA_ADMIN_USERNAME
valueFrom:
secretKeyRef:
key: username
name: gitea-credential
- name: GITEA_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: gitea-credential
- name: GITEA_ADMIN_PASSWORD_MODE
value: keepUpdated
volumeMounts:
- name: init
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
terminationGracePeriodSeconds: 60
containers:
- name: forgejo
image: "code.forgejo.org/forgejo/forgejo:9.0.2-rootless"
imagePullPolicy: IfNotPresent
env:
# SSH Port values have to be set here as well for openssh configuration
- name: SSH_LISTEN_PORT
value: "2222"
- name: SSH_PORT
value: "22"
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TMPDIR
value: /tmp/gitea
- name: HOME
value: /data/gitea/git
ports:
- name: ssh
containerPort: 2222
- name: http
containerPort: 3000
livenessProbe:
failureThreshold: 10
initialDelaySeconds: 200
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
resources:
{}
securityContext:
{}
volumeMounts:
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
volumes:
- name: init
secret:
secretName: forgejo-init
defaultMode: 110
- name: config
secret:
secretName: forgejo
defaultMode: 110
- name: inline-config-sources
secret:
secretName: forgejo-inline-config
- name: temp
emptyDir: {}
- name: data
persistentVolumeClaim:
claimName: gitea-shared-storage

View file

@ -0,0 +1,55 @@
redis-cluster:
enabled: false
postgresql:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: true
size: 5Gi
test:
enabled: false
gitea:
admin:
existingSecret: gitea-credential
config:
database:
DB_TYPE: sqlite3
session:
PROVIDER: memory
cache:
ADAPTER: memory
queue:
TYPE: level
server:
DOMAIN: 'gitea.{{{ .Env.DOMAIN }}}'
ROOT_URL: 'https://gitea.{{{ .Env.DOMAIN }}}:443'
service:
ssh:
type: NodePort
nodePort: 32222
externalTrafficPolicy: Local
image:
pullPolicy: "IfNotPresent"
# Overrides the image tag whose default is the chart appVersion.
#tag: "8.0.3"
# Adds -rootless suffix to image name
rootless: true
forgejo:
runner:
enabled: true
image:
tag: latest
# replicas: 3
config:
runner:
labels:
- docker:docker://node:16-bullseye
- self-hosted:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04

View file

@ -1,25 +1,22 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: storageclass
name: ingress-apps
namespace: argocd
labels:
example: otc
example: ref-implementation
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: default
server: "https://kubernetes.default.svc"
source:
repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/otc/storageclass"
path: "stacks/core/ingress-apps"
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1

View file

@ -0,0 +1,31 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/use-regex: "true"
name: argo-workflows-ingress
namespace: argo
spec:
ingressClassName: nginx
rules:
- host: localhost
http:
paths:
- backend:
service:
name: argo-server
port:
name: web
path: /argo-workflows(/|$)(.*)
pathType: ImplementationSpecific
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: argo-server
port:
name: web
path: /argo-workflows(/|$)(.*)
pathType: ImplementationSpecific

View file

@ -4,10 +4,11 @@ metadata:
annotations:
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
cert-manager.io/cluster-issuer: main
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/use-regex: "true"
{{{ if eq .Env.CLUSTER_TYPE "osc" }}}
dns.gardener.cloud/class: garden
dns.gardener.cloud/dnsnames: {{{ .Env.DOMAIN_ARGOCD }}}
dns.gardener.cloud/dnsnames: {{{ .Env.DOMAIN }}}
dns.gardener.cloud/ttl: "600"
{{{ end }}}
name: argocd-server
@ -15,7 +16,7 @@ metadata:
spec:
ingressClassName: nginx
rules:
- host: {{{ .Env.DOMAIN_ARGOCD }}}
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
@ -23,9 +24,9 @@ spec:
name: argocd-server
port:
number: 80
path: /
pathType: Prefix
path: /argocd(/|$)(.*)
pathType: ImplementationSpecific
tls:
- hosts:
- {{{ .Env.DOMAIN_ARGOCD }}}
- {{{ .Env.DOMAIN }}}
secretName: argocd-net-tls

View file

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: backstage
namespace: backstage
spec:
ingressClassName: nginx
rules:
- host: localhost
http:
paths:
- backend:
service:
name: backstage
port:
name: http
path: /
pathType: Prefix
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: backstage
port:
name: http
path: /
pathType: Prefix

View file

@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: fibonacci-service
namespace: fibonacci-app
spec:
ingressClassName: nginx
rules:
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: fibonacci-service
port:
number: 9090
path: /fibonacci
pathType: Prefix

View file

@ -4,28 +4,27 @@ metadata:
annotations:
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: 512m
cert-manager.io/cluster-issuer: main
{{{ if eq .Env.CLUSTER_TYPE "osc" }}}
dns.gardener.cloud/class: garden
dns.gardener.cloud/dnsnames: {{{ .Env.DOMAIN_GITEA }}}
dns.gardener.cloud/dnsnames: gitea.{{{ .Env.DOMAIN }}}
dns.gardener.cloud/ttl: "600"
{{{ end }}}
name: forgejo-server
name: forgejo
namespace: gitea
spec:
ingressClassName: nginx
rules:
- host: {{{ .Env.DOMAIN_GITEA }}}
- host: gitea.{{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: forgejo-server-http
name: forgejo-http
port:
number: 3000
path: /
pathType: Prefix
tls:
- hosts:
- {{{ .Env.DOMAIN_GITEA }}}
- gitea.{{{ .Env.DOMAIN }}}
secretName: forgejo-net-tls

View file

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: keycloak-ingress-localhost
namespace: keycloak
spec:
ingressClassName: nginx
rules:
- host: localhost
http:
paths:
- backend:
service:
name: keycloak
port:
name: http
path: /keycloak
pathType: ImplementationSpecific
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: keycloak
port:
name: http
path: /keycloak
pathType: ImplementationSpecific

View file

@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kube-prometheus-stack-grafana
namespace: monitoring
spec:
ingressClassName: nginx
rules:
- host: {{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: kube-prometheus-stack-grafana
port:
number: 80
path: /grafana
pathType: Prefix

View file

@ -0,0 +1,24 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio-console
namespace: minio-backup
{{{ if eq .Env.CLUSTER_TYPE "osc" }}}
annotations:
dns.gardener.cloud/class: garden
dns.gardener.cloud/dnsnames: minio-backup.{{{ .Env.DOMAIN }}}
dns.gardener.cloud/ttl: "600"
{{{ end }}}
spec:
ingressClassName: nginx
rules:
- host: minio-backup.{{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: minio-console
port:
number: 9001
path: /
pathType: Prefix

View file

@ -0,0 +1,24 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: openbao
namespace: openbao
{{{ if eq .Env.CLUSTER_TYPE "osc" }}}
annotations:
dns.gardener.cloud/class: garden
dns.gardener.cloud/dnsnames: openbao.{{{ .Env.DOMAIN }}}
dns.gardener.cloud/ttl: "600"
{{{ end }}}
spec:
ingressClassName: nginx
rules:
- host: openbao.{{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: openbao
port:
number: 8200
path: /
pathType: Prefix

View file

@ -12,18 +12,16 @@ spec:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: ingress-nginx
sources:
- repoURL: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/ingress-nginx-helm.git
- repoURL: https://github.com/kubernetes/ingress-nginx
path: charts/ingress-nginx
targetRevision: helm-chart-4.12.1-depends
targetRevision: helm-chart-4.11.3
helm:
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/otc/ingress-nginx/values.yaml
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
- $values/stacks/core/ingress-nginx/values.yaml
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,816 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
proxy-buffer-size: "32k"
use-forwarded-headers: "true"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
# Omit Ingress status permissions if `--update-status` is disabled.
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-metrics.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-metrics
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: metrics
port: 10254
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: NodePort
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv4
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
replicas: 1
revisionHistoryLimit: 10
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 0
template:
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: registry.k8s.io/ingress-nginx/controller:v1.11.3@sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
- --enable-ssl-passthrough
- --publish-status-address=localhost
securityContext:
runAsNonRoot: true
runAsUser: 101
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
readOnlyRootFilesystem: false
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
hostPort: 80
- name: https
containerPort: 443
protocol: TCP
hostPort: 443
- name: metrics
containerPort: 10254
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
ingress-ready: "true"
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 0
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/controller-poddisruptionbudget.yaml
# PDB is not supported for DaemonSets.
# https://github.com/kubernetes/kubernetes/issues/108124
---
# Source: ingress-nginx/templates/controller-servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
release: ingress-nginx
spec:
endpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- ingress-nginx
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
port: 443
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4@sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4@sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux

View file

@ -0,0 +1,49 @@
controller:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
ingressClassResource:
name: nginx
# added for idpbuilder
allowSnippetAnnotations: true
# added for idpbuilder
config:
proxy-buffer-size: 32k
use-forwarded-headers: "true"
# monitoring nginx
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: "ingress-nginx"
enabled: true
{{{ if eq .Env.CLUSTER_TYPE "kind" }}}
hostPort:
enabled: true
terminationGracePeriodSeconds: 0
service:
type: NodePort
nodeSelector:
ingress-ready: "true"
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
effect: "NoSchedule"
publishService:
enabled: false
extraArgs:
publish-status-address: localhost
# added for idpbuilder
enable-ssl-passthrough: ""
{{{ end }}}

View file

@ -1,104 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: forgejo-runner
name: forgejo-runner
namespace: gitea
spec:
# Two replicas means that if one is busy, the other can pick up jobs.
replicas: 1
selector:
matchLabels:
app: forgejo-runner
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: forgejo-runner
spec:
restartPolicy: Always
volumes:
- name: docker-certs
emptyDir: {}
- name: runner-data
emptyDir: {}
# Initialise our configuration file using offline registration
# https://forgejo.org/docs/v1.21/admin/actions/#offline-registration
initContainers:
- name: runner-register
image: code.forgejo.org/forgejo/runner:6.3.1
command:
- "sh"
- "-c"
- |
forgejo-runner \
register \
--no-interactive \
--token ${RUNNER_SECRET} \
--name ${RUNNER_NAME} \
--instance ${FORGEJO_INSTANCE_URL} \
--labels docker:docker://node:20-bookworm,ubuntu-22.04:docker://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/catthehackerubuntu:act-22.04,ubuntu-latest:docker://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/catthehackerubuntu:act-22.04
env:
- name: RUNNER_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: RUNNER_SECRET
valueFrom:
secretKeyRef:
name: forgejo-runner-token
key: token
- name: FORGEJO_INSTANCE_URL
value: https://{{{ .Env.DOMAIN_GITEA }}}
volumeMounts:
- name: runner-data
mountPath: /data
containers:
- name: runner
image: code.forgejo.org/forgejo/runner:6.3.1
command:
- "sh"
- "-c"
- |
while ! nc -z 127.0.0.1 2376 </dev/null; do
echo 'waiting for docker daemon...';
sleep 5;
done
forgejo-runner generate-config > config.yml ;
sed -i -e "s|privileged: .*|privileged: true|" config.yml
sed -i -e "s|network: .*|network: host|" config.yml ;
sed -i -e "s|^ envs:$$| envs:\n DOCKER_HOST: tcp://127.0.0.1:2376\n DOCKER_TLS_VERIFY: 1\n DOCKER_CERT_PATH: /certs/client|" config.yml ;
sed -i -e "s|^ options:| options: -v /certs/client:/certs/client|" config.yml ;
sed -i -e "s| valid_volumes: \[\]$$| valid_volumes:\n - /certs/client|" config.yml ;
/bin/forgejo-runner --config config.yml daemon
securityContext:
allowPrivilegeEscalation: true
privileged: true
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
env:
- name: DOCKER_HOST
value: tcp://localhost:2376
- name: DOCKER_CERT_PATH
value: /certs/client
- name: DOCKER_TLS_VERIFY
value: "1"
volumeMounts:
- name: docker-certs
mountPath: /certs
- name: runner-data
mountPath: /data
- name: daemon
image: docker:28.0.4-dind
env:
- name: DOCKER_TLS_CERTDIR
value: /certs
securityContext:
privileged: true
volumeMounts:
- name: docker-certs
mountPath: /certs

View file

@ -1,38 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forgejo-server
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: gitea
sources:
- repoURL: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/forgejo-helm.git
path: .
# first check out the desired version (example v9.0.0): https://code.forgejo.org/forgejo-helm/forgejo-helm/src/tag/v9.0.0/Chart.yaml
# (note that the chart version is not the same as the forgejo application version, which is specified in the above Chart.yaml file)
# then use the devops pipeline and select development, forgejo and the desired version (example v9.0.0):
# https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/devops-pipelines/actions?workflow=update-helm-depends.yaml&actor=0&status=0
# finally update the desired version here and include "-depends", it is created by the devops pipeline.
# why do we have an added "-depends" tag? it resolves rate limitings when downloading helm OCI dependencies
targetRevision: v9.0.0-depends
helm:
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/forgejo/forgejo-server/values.yaml
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
ref: values
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/forgejo/forgejo-server/manifests"

View file

@ -1,175 +0,0 @@
# We use recreate to make sure only one instance with one version is running, because Forgejo might break or data gets inconsistant.
strategy:
type: Recreate
redis-cluster:
enabled: false
redis:
enabled: false
postgresql:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: true
size: 200Gi
annotations:
everest.io/crypt-key-id: {{{ .Env.PVC_KMS_KEY_ID }}}
test:
enabled: false
deployment:
env:
- name: SSL_CERT_DIR
value: /etc/ssl/forgejo
extraVolumeMounts:
- mountPath: /etc/ssl/forgejo
name: custom-database-certs-volume
readOnly: true
extraVolumes:
- name: custom-database-certs-volume
secret:
secretName: custom-database-certs
gitea:
additionalConfigFromEnvs:
- name: FORGEJO__storage__MINIO_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: access-key
- name: FORGEJO__storage__MINIO_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: secret-key
- name: FORGEJO__queue__CONN_STR
valueFrom:
secretKeyRef:
name: redis-forgejo-cloud-credentials
key: connection-string
- name: FORGEJO__session__PROVIDER_CONFIG
valueFrom:
secretKeyRef:
name: redis-forgejo-cloud-credentials
key: connection-string
- name: FORGEJO__cache__HOST
valueFrom:
secretKeyRef:
name: redis-forgejo-cloud-credentials
key: connection-string
- name: FORGEJO__database__HOST
valueFrom:
secretKeyRef:
name: postgres-forgejo-cloud-credentials
key: host_port
- name: FORGEJO__database__NAME
valueFrom:
secretKeyRef:
name: postgres-forgejo-cloud-credentials
key: database
- name: FORGEJO__database__USER
valueFrom:
secretKeyRef:
name: postgres-forgejo-cloud-credentials
key: username
- name: FORGEJO__database__PASSWD
valueFrom:
secretKeyRef:
name: postgres-forgejo-cloud-credentials
key: password
- name: FORGEJO__indexer__ISSUE_INDEXER_CONN_STR
valueFrom:
secretKeyRef:
name: elasticsearch-cloud-credentials
key: connection-string
- name: FORGEJO__mailer__PASSWD
valueFrom:
secretKeyRef:
name: email-user-credentials
key: connection-string
admin:
existingSecret: gitea-credential
config:
indexer:
ISSUE_INDEXER_ENABLED: true
ISSUE_INDEXER_TYPE: elasticsearch
# TODO next
REPO_INDEXER_ENABLED: false
# REPO_INDEXER_TYPE: meilisearch # not yet working
storage:
MINIO_ENDPOINT: obs.eu-de.otc.t-systems.com:443
STORAGE_TYPE: minio
MINIO_LOCATION: eu-de
MINIO_BUCKET: edp-forgejo-{{{ .Env.CLUSTER_ENVIRONMENT }}}
MINIO_USE_SSL: true
queue:
TYPE: redis
session:
PROVIDER: redis
cache:
ENABLED: true
ADAPTER: redis
service:
DISABLE_REGISTRATION: true
other:
SHOW_FOOTER_VERSION: false
SHOW_FOOTER_TEMPLATE_LOAD_TIME: false
database:
DB_TYPE: postgres
SSL_MODE: verify-ca
server:
DOMAIN: '{{{ .Env.DOMAIN_GITEA }}}'
ROOT_URL: 'https://{{{ .Env.DOMAIN_GITEA }}}:443'
mailer:
ENABLED: true
USER: ipcei-cis-devfw@mms-support.de
PROTOCOL: smtps
FROM: '"IPCEI CIS DevFW" <ipcei-cis-devfw@mms-support.de>'
SMTP_ADDR: mail.mms-support.de
SMTP_PORT: 465
service:
ssh:
type: NodePort
nodePort: 32222
externalTrafficPolicy: Local
image:
pullPolicy: "IfNotPresent"
# Overrides the image tag whose default is the chart appVersion.
#tag: "8.0.3"
# Adds -rootless suffix to image name
rootless: true
forgejo:
runner:
enabled: true
image:
tag: latest
# replicas: 3
config:
runner:
labels:
- docker:docker://node:16-bullseye
- self-hosted:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-22.04

View file

@ -10,19 +10,19 @@ metadata:
spec:
project: default
sources:
- repoURL: "https://charts.min.io"
- repoURL: 'https://charts.min.io'
targetRevision: 5.0.15
helm:
releaseName: minio
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/local-backup/minio/helm/values.yaml
- $values/stacks/local-backup/minio/helm/values.yaml
chart: minio
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/local-backup/minio/manifests"
path: "stacks/local-backup/minio/manifests"
destination:
server: "https://kubernetes.default.svc"
namespace: minio-backup

View file

@ -10,14 +10,14 @@ metadata:
spec:
project: default
sources:
- repoURL: "https://vmware-tanzu.github.io/helm-charts"
- repoURL: 'https://vmware-tanzu.github.io/helm-charts'
targetRevision: 8.0.0
helm:
releaseName: velero
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/local-backup/velero/helm/values.yaml
- $values/stacks/local-backup/velero/helm/values.yaml
chart: velero
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values
destination:

View file

@ -1,7 +1,7 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mailhog
name: grafana-dashboards
namespace: argocd
labels:
env: dev
@ -10,16 +10,16 @@ metadata:
spec:
project: default
source:
repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/ref-implementation/mailhog"
path: "stacks/monitoring/kube-prometheus/dashboards"
destination:
server: "https://kubernetes.default.svc"
namespace: mailhog
namespace: monitoring
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
selfHeal: true
retry:
limit: -1
limit: -1

View file

@ -0,0 +1,30 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-prometheus-stack
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true # do not copy metdata, since (because of its large size) it can lead to sync failure
destination:
name: in-cluster
namespace: monitoring
sources:
- repoURL: https://github.com/prometheus-community/helm-charts
path: charts/kube-prometheus-stack
targetRevision: HEAD
helm:
valueFiles:
- $values/stacks/monitoring/kube-prometheus/values.yaml
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,268 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-dashboard-1
labels:
grafana_dashboard: "1"
data:
k8s-dashboard-01.json: |
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 1,
"links": [
],
"panels": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 0
},
"id": 5,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"expr": "{app=\"crossplane\"}",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: App crossplane",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 8
},
"id": 4,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"expr": "{app=\"argo-server\"}",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: App argo-server",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 16
},
"id": 3,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"expr": "{app=\"forgejo\"}",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: App forgejo",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 24
},
"id": 2,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"expr": "{app=\"backstage\"}",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: App backstage",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 32
},
"id": 1,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"expr": "{app=\"shoot-control-plane\"}",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: App shoot-control-plane",
"type": "logs"
}
],
"preload": false,
"schemaVersion": 40,
"tags": [
],
"templating": {
"list": [
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
},
"timezone": "browser",
"title": "Loki Logs: Apps",
"uid": "ee4iuluru756of",
"version": 2,
"weekStart": ""
}

View file

@ -0,0 +1,845 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-dashboard-2
labels:
grafana_dashboard: "1"
data:
k8s-dashboard-02.json: |
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 30,
"links": [
],
"panels": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 0
},
"id": 19,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"server\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component server",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 8
},
"id": 17,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"repo-server\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component repo-server",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 16
},
"id": 16,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"redis\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component redis",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 24
},
"id": 15,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"query-frontend\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component query-frontend",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 32
},
"id": 14,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"querier\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component querier",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 40
},
"id": 13,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"prometheus-operator-webhook\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component prometheus-operator-webhook",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 48
},
"id": 12,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"prometheus-operator\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component prometheus-operator",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 56
},
"id": 11,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"metrics\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component metrics",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 64
},
"id": 10,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"kube-scheduler\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component kube-scheduler",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 72
},
"id": 9,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"kube-controller-manager\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component kube-controller-manager",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 80
},
"id": 8,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"kube-apiserver\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component kube-apiserver",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 88
},
"id": 7,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"ingester\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component ingester",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 96
},
"id": 6,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"gateway\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component gateway",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 104
},
"id": 5,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"etcd\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component etcd",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 112
},
"id": 4,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"distributor\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component distributor",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 120
},
"id": 3,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"controller\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component controller",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 128
},
"id": 2,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"cloud-infrastructure-controller\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component cloud-infrastructure-controller",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 136
},
"id": 1,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{component=\"applicationset-controller\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Component application-controller",
"type": "logs"
}
],
"preload": false,
"schemaVersion": 40,
"tags": [
],
"templating": {
"list": [
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
},
"timezone": "browser",
"title": "Loki Logs: Components",
"uid": "ae4zuyp1kui9sc",
"version": 2,
"weekStart": ""
}

View file

@ -0,0 +1,537 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-dashboard-3
labels:
grafana_dashboard: "1"
data:
k8s-dashboard-03.json: |
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 31,
"links": [
],
"panels": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 0
},
"id": 11,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"repo-server\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container repo-server",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 8
},
"id": 10,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"promtail\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container promtail",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 16
},
"id": 9,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"prometheus\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container prometheus",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 24
},
"id": 8,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"postgres\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container postgres",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 32
},
"id": 7,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"kube-prometheus-stack\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container kube-prometheus-stack",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 40
},
"id": 6,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"keycloak\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container keycloak",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 48
},
"id": 5,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"grafana\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container grafana",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 56
},
"id": 4,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"forgejo\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container forgejo",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 64
},
"id": 3,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"crossplane\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container crossplane",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 72
},
"id": 2,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"backstage\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container backstage",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"fieldConfig": {
"defaults": {
},
"overrides": [
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 80
},
"id": 1,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"pluginVersion": "11.3.1",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "builder",
"expr": "{container=\"argo-server\"} |= ``",
"queryType": "range",
"refId": "A"
}
],
"title": "Logs: Container argo-server",
"type": "logs"
}
],
"preload": false,
"schemaVersion": 40,
"tags": [
],
"templating": {
"list": [
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
},
"timezone": "browser",
"title": "Loki Logs: Container",
"uid": "ee50bcaehmv40e",
"version": 2,
"weekStart": ""
}

View file

@ -0,0 +1,45 @@
grafana:
namespaceOverride: "monitoring"
admin:
existingSecret: "kube-prometheus-stack-grafana-admin-password"
userKey: admin-user
passwordKey: admin-password
defaultDashboardsTimezone: Europe/Berlin
additionalDataSources:
- name: Loki
type: loki
url: http://loki-loki-distributed-gateway.monitoring:80
# syncPolicy:
# syncOptions:
# - ServerSideApply=true
sidecar:
dashboards:
enabled: true
label: grafana_dashboard
folder: /tmp/dashboards
updateIntervalSeconds: 10
folderAnnotation: grafana_folder
provider:
allowUiUpdates: true
foldersFromFilesStructure: true
grafana.ini:
server:
domain: {{{ .Env.DOMAIN }}}
root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true
serviceMonitor:
# If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator
enabled: true
#monitoring nginx
prometheus:
prometheusSpec:
podMonitorSelectorNilUsesHelmValues: false
serviceMonitorSelectorNilUsesHelmValues: false

View file

@ -0,0 +1,34 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: loki
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: monitoring
sources:
- repoURL: https://github.com/grafana/helm-charts
path: charts/loki-distributed
targetRevision: HEAD
helm:
valueFiles:
- $values/stacks/monitoring/loki/values.yaml
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values
## consider using the following version, if it works again
#- repoURL: https://github.com/grafana/loki
# path: production/helm/loki

View file

@ -0,0 +1,7 @@
loki:
commonConfig:
replication_factor: 1
auth_enabled: false
# storageConfig:
# filesystem: null

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: promtail
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: monitoring
sources:
- repoURL: https://github.com/grafana/helm-charts
path: charts/promtail
targetRevision: HEAD
helm:
valueFiles:
- $values/stacks/monitoring/promtail/values.yaml
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,45 @@
# -- Overrides the chart's name
nameOverride: null
# -- Overrides the chart's computed fullname
fullnameOverride: null
global:
# -- Allow parent charts to override registry hostname
imageRegistry: ""
# -- Allow parent charts to override registry credentials
imagePullSecrets: []
daemonset:
# -- Deploys Promtail as a DaemonSet
enabled: true
autoscaling:
# -- Creates a VerticalPodAutoscaler for the daemonset
enabled: false
deployment:
# -- Deploys Promtail as a Deployment
enabled: false
config:
enabled: true
logLevel: info
logFormat: logfmt
serverPort: 3101
clients:
- url: http://loki-loki-distributed-gateway/loki/api/v1/push
scrape_configs:
- job_name: authlog
static_configs:
- targets:
- authlog
labels:
job: authlog
__path__: /logs/auth.log
- job_name: syslog
static_configs:
- targets:
- syslog
labels:
job: syslog
__path__: /logs/syslog

View file

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metrics-server
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: observability
sources:
- chart: metrics-server
repoURL: https://kubernetes-sigs.github.io/metrics-server/
targetRevision: 3.12.2
helm:
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/observability-client/metrics-server/values.yaml
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
ref: values

View file

@ -1,4 +0,0 @@
metrics:
enabled: true
serviceMonitor:
enabled: true

View file

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vector
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: observability
sources:
- chart: vector
repoURL: https://helm.vector.dev
targetRevision: 0.43.0
helm:
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/observability-client/vector/values.yaml
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
ref: values

View file

@ -1,68 +0,0 @@
# -- Enable deployment of vector
role: Agent
dataDir: /vector-data-dir
resources: {}
args:
- -w
- --config-dir
- /etc/vector/
env:
- name: VECTOR_USER
valueFrom:
secretKeyRef:
name: simple-user-secret
key: username
- name: VECTOR_PASSWORD
valueFrom:
secretKeyRef:
name: simple-user-secret
key: password
containerPorts:
- name: prom-exporter
containerPort: 9090
protocol: TCP
service:
enabled: false
customConfig:
data_dir: /vector-data-dir
api:
enabled: false
address: 0.0.0.0:8686
playground: true
sources:
k8s:
type: kubernetes_logs
internal_metrics:
type: internal_metrics
transforms:
parser:
type: remap
inputs: [k8s]
source: |
._msg = parse_json(.message) ?? .message
del(.message)
# Add the cluster environment to the log event
.cluster_environment = "{{{ .Env.CLUSTER_ENVIRONMENT }}}"
sinks:
vlogs:
type: elasticsearch
inputs: [parser]
endpoints:
- https://{{{ .Env.DOMAIN_O12Y }}}/insert/elasticsearch/
auth:
strategy: basic
user: ${VECTOR_USER}
password: ${VECTOR_PASSWORD}
mode: bulk
api_version: v8
compression: gzip
healthcheck:
enabled: false
request:
headers:
AccountID: "0"
ProjectID: "0"
query:
_msg_field: _msg
_time_field: _time
_stream_fields: cluster_environment,kubernetes.container_name,kubernetes.namespace

View file

@ -1,30 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vm-client
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: observability
sources:
- chart: victoria-metrics-k8s-stack
repoURL: https://victoriametrics.github.io/helm-charts/
targetRevision: 0.48.1
helm:
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/observability-client/vm-client-stack/values.yaml
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
ref: values
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/observability-client/vm-client-stack/manifests"

View file

@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: simple-user-secret
namespace: observability
type: Opaque
stringData:
username: simple-user
password: simple-password

View file

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: grafana-operator
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
destination:
name: in-cluster
namespace: observability
sources:
- chart: grafana-operator
repoURL: ghcr.io/grafana/helm-charts
targetRevision: v5.18.0
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/observability/grafana-operator/manifests"

View file

@ -1,9 +0,0 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaDashboard
metadata:
name: argocd
spec:
instanceSelector:
matchLabels:
dashboards: "grafana"
url: "https://raw.githubusercontent.com/argoproj/argo-cd/refs/heads/master/examples/dashboard.json"

View file

@ -1,36 +0,0 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: Grafana
metadata:
name: grafana
labels:
dashboards: "grafana"
spec:
persistentVolumeClaim:
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
ingress:
metadata:
annotations:
cert-manager.io/cluster-issuer: main
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
rules:
- host: grafana.{{{ .Env.DOMAIN }}}
http:
paths:
- backend:
service:
name: grafana-service
port:
number: 3000
path: /
pathType: Prefix
tls:
- hosts:
- grafana.{{{ .Env.DOMAIN }}}
secretName: grafana-net-tls

View file

@ -1,9 +0,0 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaDashboard
metadata:
name: ingress-nginx
spec:
instanceSelector:
matchLabels:
dashboards: "grafana"
url: "https://raw.githubusercontent.com/adinhodovic/ingress-nginx-mixin/refs/heads/main/dashboards_out/ingress-nginx-overview.json"

View file

@ -1,9 +0,0 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaDashboard
metadata:
name: victoria-logs
spec:
instanceSelector:
matchLabels:
dashboards: "grafana"
url: "https://raw.githubusercontent.com/VictoriaMetrics/VictoriaMetrics/refs/heads/master/dashboards/vm/victorialogs.json"

View file

@ -1,31 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: o12y
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
destination:
name: in-cluster
namespace: observability
sources:
- chart: victoria-metrics-k8s-stack
repoURL: https://victoriametrics.github.io/helm-charts/
targetRevision: 0.48.1
helm:
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/observability/victoria-k8s-stack/values.yaml
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
ref: values
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/observability/victoria-k8s-stack/manifests"

View file

@ -1,24 +0,0 @@
apiVersion: operator.victoriametrics.com/v1beta1
kind: VLogs
metadata:
name: victorialogs
namespace: observability
spec:
retentionPeriod: "12"
removePvcAfterDelete: true
storageMetadata:
annotations:
everest.io/crypt-key-id: {{{ .Env.PVC_KMS_KEY_ID }}}
storage:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
resources:
requests:
memory: 500Mi
cpu: 500m
limits:
memory: 10Gi
cpu: 2

View file

@ -1,15 +0,0 @@
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMUser
metadata:
name: simple-user
namespace: observability
spec:
username: simple-user
password: simple-password
targetRefs:
- static:
url: http://vmsingle-o12y:8429
paths: ["/api/v1/write"]
- static:
url: http://vlogs-victorialogs:9428
paths: ["/insert/elasticsearch/.*"]

File diff suppressed because it is too large Load diff

View file

@ -1,14 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: main
spec:
acme:
email: admin@think-ahead.tech
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: cluster-issuer-account-key
solvers:
- http01:
ingress:
ingressClassName: nginx

View file

@ -1,4 +0,0 @@
crds:
enabled: true
replicaCount: 1

View file

@ -1,32 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: cert-manager
sources:
- chart: cert-manager
repoURL: https://charts.jetstack.io
targetRevision: v1.17.2
helm:
valueFiles:
- $values/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/otc/cert-manager/values.yaml
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
ref: values
- repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/otc/cert-manager/manifests"

View file

@ -1,31 +0,0 @@
controller:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
service:
annotations:
kubernetes.io/elb.class: union
kubernetes.io/elb.port: '80'
kubernetes.io/elb.id: {{{ .Env.LOADBALANCER_ID }}}
kubernetes.io/elb.ip: {{{ .Env.LOADBALANCER_IP }}}
ingressClassResource:
name: nginx
# added for idpbuilder
allowSnippetAnnotations: true
# added for idpbuilder
config:
proxy-buffer-size: 32k
use-forwarded-headers: "true"
# monitoring nginx
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: "ingress-nginx"
enabled: true

View file

@ -1,18 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
name: default
parameters:
kubernetes.io/description: ""
kubernetes.io/hw:passthrough: "true"
kubernetes.io/storagetype: BS
kubernetes.io/volumetype: SATA
kubernetes.io/zone: eu-de-02
provisioner: flexvolume-huawei.com/fuxivol
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true

View file

@ -10,9 +10,9 @@ metadata:
spec:
project: default
source:
repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/ref-implementation/argo-workflows/manifests/dev"
path: "stacks/ref-implementation/argo-workflows/manifests/dev"
destination:
server: "https://kubernetes.default.svc"
namespace: argo
@ -23,7 +23,3 @@ spec:
selfHeal: true
retry:
limit: -1
backoff:
duration: 15s
factor: 1
maxDuration: 15s

View file

@ -10,11 +10,11 @@ metadata:
spec:
project: default
source:
repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/ref-implementation/backstage-templates/entities"
path: "stacks/ref-implementation/backstage-templates/entities"
directory:
exclude: "catalog-info.yaml"
exclude: 'catalog-info.yaml'
destination:
server: "https://kubernetes.default.svc"
namespace: backstage

View file

@ -20,7 +20,7 @@ metadata:
backstage.io/kubernetes-namespace: default
argocd/app-name: ${{values.name | dump}}
links:
- url: https://{{{ .Env.DOMAIN_GITEA }}}:443
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Repo URL
icon: github
spec:

View file

@ -100,7 +100,7 @@ spec:
input:
description: This is an example app
# Hard coded value for this demo purposes only.
repoUrl: {{{ .Env.DOMAIN_GITEA }}}:443/?repo=${{parameters.name}}
repoUrl: gitea.{{{ .Env.DOMAIN }}}:443/?repo=${{parameters.name}}
defaultBranch: main
- id: create-argocd-app
name: Create ArgoCD App
@ -111,7 +111,7 @@ spec:
argoInstance: in-cluster
projectName: default
# necessary until we generate our own cert
repoUrl: https://{{{ .Env.DOMAIN_GITEA }}}:443/giteaAdmin/${{parameters.name}}
repoUrl: https://gitea.{{{ .Env.DOMAIN }}}:443/giteaAdmin/${{parameters.name}}
path: "kustomize/base"
- id: register
name: Register

View file

@ -14,7 +14,7 @@ metadata:
apache-spark.cnoe.io/label-selector: env=dev,entity-id=${{values.name}}
apache-spark.cnoe.io/cluster-name: local
links:
- url: https://{{{ .Env.DOMAIN_GITEA }}}:443
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Repo URL
icon: github
spec:

View file

@ -35,7 +35,7 @@ spec:
input:
description: This is an example app
# Hard coded value for this demo purposes only.
repoUrl: {{{ .Env.DOMAIN_GITEA }}}:443/?repo=${{parameters.name}}
repoUrl: gitea.{{{ .Env.DOMAIN }}}:443/?repo=${{parameters.name}}
defaultBranch: main
- id: create-argocd-app
name: Create ArgoCD App
@ -46,7 +46,7 @@ spec:
argoInstance: in-cluster
projectName: default
# necessary until we generate our own cert
repoUrl: https://{{{ .Env.DOMAIN_GITEA }}}:443/giteaAdmin/${{parameters.name}}
repoUrl: https://gitea.{{{ .Env.DOMAIN }}}:443/giteaAdmin/${{parameters.name}}
path: "manifests"
- id: register
name: Register

View file

@ -10,7 +10,7 @@ metadata:
backstage.io/kubernetes-namespace: default
argocd/app-name: ${{values.name | dump}}
links:
- url: https://{{{ .Env.DOMAIN_GITEA }}}:443
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Repo URL
icon: github
spec:

View file

@ -31,7 +31,7 @@ spec:
input:
description: This is an example app
# Hard coded value for this demo purposes only.
repoUrl: {{{ .Env.DOMAIN_GITEA }}}:443/?repo=${{parameters.name}}
repoUrl: gitea.{{{ .Env.DOMAIN }}}:443/?repo=${{parameters.name}}
defaultBranch: main
- id: create-argocd-app
name: Create ArgoCD App
@ -42,7 +42,7 @@ spec:
argoInstance: in-cluster
projectName: default
# necessary until we generate our own cert
repoUrl: https://{{{ .Env.DOMAIN_GITEA }}}:443/giteaAdmin/${{parameters.name}}
repoUrl: https://gitea.{{{ .Env.DOMAIN }}}:443/giteaAdmin/${{parameters.name}}
path: "manifests"
- id: register
name: Register

View file

@ -9,7 +9,7 @@ metadata:
backstage.io/kubernetes-label-selector: 'entity-id=${{ values.name }}'
backstage.io/kubernetes-namespace: gitea
links:
- url: https://{{{ .Env.DOMAIN_GITEA }}}:443
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Repo URL
icon: git
spec:
@ -26,7 +26,7 @@ metadata:
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://{{{ .Env.DOMAIN_GITEA }}}:443
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Gitea Repo
icon: git
spec:

View file

@ -33,7 +33,7 @@ spec:
name: Publish to Gitea
action: publish:gitea
input:
repoUrl: {{{ .Env.DOMAIN_GITEA }}}:443/?repo=${{parameters.name}}
repoUrl: gitea.{{{ .Env.DOMAIN }}}:443/?repo=${{parameters.name}}
description: This is the repository for ${{ parameters.name }}
sourcePath: ./skeleton
defaultBranch: main

View file

@ -0,0 +1,32 @@
# This workflow will build a Java project with Gradle, and cache/restore any dependencies to improve the workflow execution time
# For more information see: https://docs.github.com/en/actions/use-cases-and-examples/building-and-testing/building-and-testing-java-with-gradle
name: Java CI with Gradle
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
java: [ '17' ]
steps:
- uses: actions/checkout@v4
- name: Set up JDK {% raw %}${{matrix.java}}{% endraw %}
uses: https://github.com/actions/setup-java@v4
with:
java-version: '{% raw %}${{matrix.java}}{% endraw %}'
distribution: 'adopt'
cache: maven
- name: Setup Gradle
uses: https://github.com/gradle/actions/setup-gradle@v4
- name: Build with Gradle
run: ./gradlew build

View file

@ -28,12 +28,12 @@ jobs:
distribution: 'adopt'
cache: maven
- name: Build with Maven Wrapper
run: ./mvnw -B -DskipTests verify
run: ./mvnw -B verify
- name: Build image
#run: ./mvnw spring-boot:build-image # the original image build
run: |
export CONTAINER_REPO=$(echo {% raw %}${{ env.GITHUB_REPOSITORY }}{% endraw %} | tr '[:upper:]' '[:lower:]')
./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:build -Djib.allowInsecureRegistries=true -Dimage={{{ .Env.DOMAIN_GITEA }}}/${CONTAINER_REPO}:latest -Djib.to.auth.username={% raw %}${{ secrets.PACKAGES_USER }}{% endraw %} -Djib.to.auth.password={% raw %}${{ secrets.PACKAGES_TOKEN }}{% endraw %} -Djib.from.platforms=linux/arm64,linux/amd64
./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:build -Djib.allowInsecureRegistries=true -Dimage=gitea.{{{ .Env.DOMAIN }}}/${CONTAINER_REPO}:latest -Djib.to.auth.username={% raw %}${{ github.actor }}{% endraw %} -Djib.to.auth.password={% raw %}${{ secrets.PACKAGES_TOKEN }}{% endraw %}
- name: Build image as tar
run: |
./mvnw com.google.cloud.tools:jib-maven-plugin:3.4.4:buildTar -Djib.allowInsecureRegistries=true
@ -57,11 +57,7 @@ jobs:
NODE_TLS_REJECT_UNAUTHORIZED: 0 # This is necessary due to self signed certs for forgejo, proper setups can skip this
- name: install trivy from deb package
run: |
if [[ "$(uname -m)" == "x86_64" ]]; then
wget -O trivy.deb https://github.com/aquasecurity/trivy/releases/download/v0.58.0/trivy_0.58.0_Linux-64bit.deb
else
wget -O trivy.deb https://github.com/aquasecurity/trivy/releases/download/v0.58.0/trivy_0.58.0_Linux-ARM64.deb
fi
wget -O trivy.deb https://github.com/aquasecurity/trivy/releases/download/v0.58.0/trivy_0.58.0_Linux-64bit.deb
DEBIAN_FRONTEND=noninteractive dpkg -i trivy.deb
- name: scan the image
run: trivy image --input jib-image.tar

View file

@ -9,7 +9,7 @@ metadata:
backstage.io/kubernetes-label-selector: 'entity-id=${{ values.name }}'
backstage.io/kubernetes-namespace: gitea
links:
- url: https://{{{ .Env.DOMAIN_GITEA }}}:443
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Repo URL
icon: git
spec:
@ -26,7 +26,7 @@ metadata:
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://{{{ .Env.DOMAIN_GITEA }}}:443
- url: https://gitea.{{{ .Env.DOMAIN }}}:443
title: Gitea Repo
icon: git
spec:

View file

@ -58,7 +58,7 @@ spec:
spec:
containers:
- name: workload
image: {{{ .Env.DOMAIN_GITEA }}}/giteaadmin/${{ values.name }}
image: gitea.{{{ .Env.DOMAIN }}}/giteaadmin/${{ values.name }}
env:
- name: SPRING_PROFILES_ACTIVE
value: postgres

View file

@ -50,7 +50,7 @@ spec:
name: Publish to Gitea
action: publish:gitea
input:
repoUrl: {{{ .Env.DOMAIN_GITEA }}}:443/?repo=${{parameters.name}}
repoUrl: gitea.{{{ .Env.DOMAIN }}}:443/?repo=${{parameters.name}}
description: This is the repository for ${{ parameters.name }}
sourcePath: ./
defaultBranch: main
@ -64,7 +64,7 @@ spec:
argoInstance: in-cluster
projectName: default
# necessary until we generate our own cert
repoUrl: https://{{{ .Env.DOMAIN_GITEA }}}:443/giteaAdmin/${{parameters.name}}
repoUrl: https://gitea.{{{ .Env.DOMAIN }}}:443/giteaAdmin/${{parameters.name}}
path: "k8s"
- id: register

View file

@ -10,9 +10,9 @@ metadata:
spec:
project: default
source:
repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/ref-implementation/backstage/manifests"
path: "stacks/ref-implementation/backstage/manifests"
destination:
server: "https://kubernetes.default.svc"
namespace: backstage
@ -23,7 +23,3 @@ spec:
selfHeal: true
retry:
limit: -1
backoff:
duration: 15s
factor: 1
maxDuration: 15s

View file

@ -106,12 +106,12 @@ data:
integrations:
gitea:
- baseUrl: https://{{{ .Env.DOMAIN_GITEA }}}:443
host: {{{ .Env.DOMAIN_GITEA }}}:443
- baseUrl: https://gitea.{{{ .Env.DOMAIN }}}:443
host: gitea.{{{ .Env.DOMAIN }}}:443
username: ${GITEA_USERNAME}
password: ${GITEA_PASSWORD}
- baseUrl: https://{{{ .Env.DOMAIN_GITEA }}}
host: {{{ .Env.DOMAIN_GITEA }}}
- baseUrl: https://gitea.{{{ .Env.DOMAIN }}}
host: gitea.{{{ .Env.DOMAIN }}}
username: ${GITEA_USERNAME}
password: ${GITEA_PASSWORD}
- baseUrl: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live
@ -167,7 +167,7 @@ data:
locations:
# Examples from a public GitHub repository.
- type: url
target: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}:443/{{{ .Env.CLIENT_REPO_ORG_NAME }}}/raw/branch/main/{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/ref-implementation/backstage-templates/entities/catalog-info.yaml
target: https://gitea.{{{ .Env.DOMAIN }}}:443/giteaAdmin/edfbuilder/raw/branch/main/stacks/ref-implementation/backstage-templates/entities/catalog-info.yaml
rules:
- allow: [Component, System, API, Resource, Location, Template, User, Group]
kubernetes:
@ -255,8 +255,6 @@ spec:
value: debug
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: "0"
- name: NODE_OPTIONS
value: "--no-node-snapshot"
envFrom:
- secretRef:
name: backstage-env-vars
@ -264,8 +262,7 @@ spec:
name: gitea-credentials
- secretRef:
name: argocd-credentials
image: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/devfw-cicd/backstage-edp:1.1.0
imagePullPolicy: Always
image: ghcr.io/cnoe-io/backstage-app:9232d633b2698fffa6d0a73b715e06640d170162
name: backstage
ports:
- containerPort: 7007
@ -389,7 +386,7 @@ spec:
KEYCLOAK_NAME_METADATA: https://{{{ .Env.DOMAIN }}}:443/keycloak/realms/cnoe/.well-known/openid-configuration
KEYCLOAK_CLIENT_SECRET: "{{.BACKSTAGE_CLIENT_SECRET}}"
ARGOCD_AUTH_TOKEN: "argocd.token={{.ARGOCD_SESSION_TOKEN}}"
ARGO_CD_URL: 'https://{{{ .Env.DOMAIN }}}/argocd/api/v1/'
ARGO_CD_URL: 'https://argocd-server.argocd.svc.cluster.local/api/v1/'
data:
- secretKey: ARGOCD_SESSION_TOKEN
remoteRef:

View file

@ -12,9 +12,9 @@ spec:
namespace: external-secrets
server: "https://kubernetes.default.svc"
source:
repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/ref-implementation/external-secrets/manifests"
path: "stacks/ref-implementation/external-secrets/manifests"
project: default
syncPolicy:
automated:

View file

@ -1,7 +1,7 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forgejo-runner
name: fibonacci-app
namespace: argocd
labels:
env: dev
@ -9,16 +9,17 @@ metadata:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
source:
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "stacks/ref-implementation/fibonacci-app"
destination:
server: "https://kubernetes.default.svc"
source:
repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/forgejo/forgejo-runner"
namespace: fibonacci-app
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
selfHeal: true
retry:
limit: -1

View file

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fibonacci-deployment
namespace: fibonacci-app
spec:
replicas: 1
selector:
matchLabels:
app: fibonacci-go
template:
metadata:
labels:
app: fibonacci-go
spec:
containers:
- name: fibonacci-go
image: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/christopher.hase/fibonacci_http_go:1.0.0
ports:
- containerPort: 9090

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: fibonacci-service
namespace: fibonacci-app
spec:
selector:
app: fibonacci-go
ports:
- protocol: TCP
port: 9090
targetPort: 9090
type: ClusterIP

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forgejo-runner
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: gitea
sources:
- repoURL: https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/forgejo-runner.git
path: forgejo-runner
targetRevision: HEAD
helm:
valueFiles:
- $values/stacks/ref-implementation/forgejo-runner/values.yaml
- repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,8 @@
replicaCount: 2
image:
repository: code.forgejo.org/forgejo/runner
pullPolicy: IfNotPresent
tag: "6.0.1"
forgejoUrl: http://forgejo-http.gitea.svc.cluster.local:3000

View file

@ -12,9 +12,9 @@ spec:
namespace: keycloak
server: "https://kubernetes.default.svc"
source:
repoURL: https://{{{ .Env.CLIENT_REPO_DOMAIN }}}/{{{ .Env.CLIENT_REPO_ORG_NAME }}}
repoURL: https://gitea.{{{ .Env.DOMAIN }}}/giteaAdmin/edfbuilder
targetRevision: HEAD
path: "{{{ .Env.CLIENT_REPO_ID }}}/{{{ .Env.DOMAIN }}}/stacks/ref-implementation/keycloak/manifests"
path: "stacks/ref-implementation/keycloak/manifests"
project: default
syncPolicy:
automated:

View file

@ -71,11 +71,11 @@ data:
},
"type": "default",
"protocol": "openid-connect"
}
}
group-admin-payload.json: |
{"name":"admin"}
{"name":"admin"}
group-base-user-payload.json: |
{"name":"base-user"}
{"name":"base-user"}
group-mapper-payload.json: |
{
"protocol": "openid-connect",
@ -88,41 +88,41 @@ data:
"access.token.claim": "true",
"userinfo.token.claim": "true"
}
}
}
realm-payload.json: |
{"realm":"cnoe","enabled":true}
{"realm":"cnoe","enabled":true}
user-password.json: |
{
"temporary": false,
"type": "password",
"value": "${USER1_PASSWORD}"
}
}
user-user1.json: |
{
"username": "user1",
"email": "user1@user.de",
"email": "",
"firstName": "user",
"lastName": "one",
"requiredActions": [],
"emailVerified": true,
"emailVerified": false,
"groups": [
"/admin"
],
"enabled": true
}
}
user-user2.json: |
{
"username": "user2",
"email": "user2@user.de",
"email": "",
"firstName": "user",
"lastName": "two",
"requiredActions": [],
"emailVerified": true,
"emailVerified": false,
"groups": [
"/base-user"
],
"enabled": true
}
}
argo-client-payload.json: |
{
"protocol": "openid-connect",
@ -150,7 +150,7 @@ data:
"webOrigins": [
"/*"
]
}
}
backstage-client-payload.json: |
{
@ -179,104 +179,8 @@ data:
"webOrigins": [
"/*"
]
}
grafana-client-payload.json: |
{
"clientId": "grafana",
"name": "Grafana Client",
"description": "Used for Grafana SSO",
"rootUrl": "https://{{{ .Env.DOMAIN }}}/grafana",
"adminUrl": "https://{{{ .Env.DOMAIN }}}/grafana",
"baseUrl": "https://{{{ .Env.DOMAIN }}}/grafana",
"alwaysDisplayInConsole": false,
"redirectUris": [
"http://{{{ .Env.DOMAIN }}}/grafana/*"
],
"webOrigins": [
"https://{{{ .Env.DOMAIN }}}/grafana"
],
"standardFlowEnabled": true,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"serviceAccountsEnabled": false,
"publicClient": false,
"frontchannelLogout": true,
"protocol": "openid-connect",
"attributes": {
"saml_idp_initiated_sso_url_name": "",
"oidc.ciba.grant.enabled": "false",
"oauth2.device.authorization.grant.enabled": "false"
},
"defaultClientScopes": [
"web-origins",
"acr",
"offline_access",
"roles",
"profile",
"groups",
"email"
]
}
argocd-client-payload.json: |
{
"protocol": "openid-connect",
"clientId": "argocd",
"name": "ArgoCD Client",
"description": "Used for ArgoCD SSO",
"publicClient": false,
"authorizationServicesEnabled": false,
"serviceAccountsEnabled": false,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"standardFlowEnabled": true,
"frontchannelLogout": true,
"attributes": {
"saml_idp_initiated_sso_url_name": "",
"oauth2.device.authorization.grant.enabled": false,
"oidc.ciba.grant.enabled": false
},
"alwaysDisplayInConsole": false,
"rootUrl": "",
"baseUrl": "",
"redirectUris": [
"https://{{{ .Env.DOMAIN }}}/*"
],
"webOrigins": [
"/*"
]
}
forgejo-client-payload.json: |
{
"protocol": "openid-connect",
"clientId": "forgejo",
"name": "Forgejo Client",
"description": "Used for Forgejo SSO",
"publicClient": false,
"authorizationServicesEnabled": false,
"serviceAccountsEnabled": false,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"standardFlowEnabled": true,
"frontchannelLogout": true,
"attributes": {
"saml_idp_initiated_sso_url_name": "",
"oauth2.device.authorization.grant.enabled": false,
"oidc.ciba.grant.enabled": false
},
"alwaysDisplayInConsole": false,
"rootUrl": "",
"baseUrl": "",
"redirectUris": [
"https://{{{ .Env.DOMAIN_GITEA }}}/*"
],
"webOrigins": [
"/*"
]
}
---
apiVersion: batch/v1
kind: Job
@ -312,7 +216,7 @@ spec:
command: ["/bin/bash", "-c"]
args:
- |
#! /bin/bash
#! /bin/bash
set -ex -o pipefail
@ -343,11 +247,7 @@ spec:
fi
set -e
if [[ "$(uname -m)" == "x86_64" ]]; then
curl -sS -LO "https://dl.k8s.io/release/v1.28.3//bin/linux/amd64/kubectl"
else
curl -sS -LO "https://dl.k8s.io/release/v1.28.3//bin/linux/arm64/kubectl"
fi
curl -sS -LO "https://dl.k8s.io/release/v1.28.3//bin/linux/amd64/kubectl"
chmod +x kubectl
echo "creating cnoe realm and groups"
@ -373,7 +273,7 @@ spec:
${KEYCLOAK_URL}/admin/realms/cnoe/groups
# Create scope mapper
echo 'adding group claim to tokens'
echo 'adding group claim to tokens'
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" \
@ -413,8 +313,8 @@ spec:
echo "creating Argo Workflows client"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/argo-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
-X POST --data @/var/config/argo-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
@ -428,100 +328,29 @@ spec:
-X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
ARGO_WORKFLOWS_CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
echo "creating Grafana client"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/grafana-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "grafana") | .id')
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
GRAFANA_CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
echo "creating Backstage client"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/backstage-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
${KEYCLOAK_URL}/admin/realms/cnoe/clients
CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "backstage") | .id')
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
BACKSTAGE_CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
echo "creating ArgoCD client"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/argocd-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "argocd") | .id')
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
ARGOCD_CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
echo "creating Forgejo client"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/forgejo-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "forgejo") | .id')
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
FORGEJO_CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
ARGOCD_PASSWORD=$(./kubectl -n argocd get secret argocd-initial-admin-secret -o go-template='{{.data.password | base64decode }}')
ARGOCD_SESSION_TOKEN=$(curl -sS https://{{{ .Env.DOMAIN }}}/argocd/api/v1/session -H 'Content-Type: application/json' -d "{\"username\":\"admin\",\"password\":\"${ARGOCD_PASSWORD}\"}" | jq -r .token)
ARGOCD_SESSION_TOKEN=$(curl -k -sS http://argocd-server.argocd.svc.cluster.local:443/api/v1/session -H 'Content-Type: application/json' -d "{\"username\":\"admin\",\"password\":\"${ARGOCD_PASSWORD}\"}" | jq -r .token)
echo \
"apiVersion: v1
@ -536,12 +365,7 @@ spec:
ARGOCD_SESSION_TOKEN: ${ARGOCD_SESSION_TOKEN}
BACKSTAGE_CLIENT_SECRET: ${BACKSTAGE_CLIENT_SECRET}
BACKSTAGE_CLIENT_ID: backstage
GRAFANA_CLIENT_SECRET: ${GRAFANA_CLIENT_SECRET}
GRAFANA_CLIENT_ID: grafana
ARGOCD_CLIENT_SECRET: ${ARGOCD_CLIENT_SECRET}
ARGOCD_CLIENT_ID: argocd
FORGEJO_CLIENT_SECRET: ${FORGEJO_CLIENT_SECRET}
FORGEJO_CLIENT_ID: forgejo
" > /tmp/secret.yaml
./kubectl apply -f /tmp/secret.yaml

Some files were not shown because too many files have changed in this diff Show more