Compare commits

...

2 commits

Author SHA1 Message Date
Richard Robert Reitz
dd034cf862 Fixed Forgejo URL and defined the labels 2025-02-22 22:21:55 +01:00
Richard Robert Reitz
1813371a88 Added hydrated version of nginx-ingress, argocd and forgejo 2025-02-19 20:40:59 +01:00
4 changed files with 25971 additions and 1 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,619 @@
---
# Source: forgejo/templates/gitea/config.yaml
apiVersion: v1
kind: Secret
metadata:
name: forgejo-inline-config
namespace: "gitea"
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
_generals_: |-
APP_NAME=Forgejo: Beyond coding. We forge.
RUN_MODE=prod
cache: |-
ADAPTER=memory
HOST=
database: DB_TYPE=sqlite3
indexer: ISSUE_INDEXER_TYPE=db
metrics: ENABLED=false
queue: |-
CONN_STR=
TYPE=level
repository: ROOT=/data/git/gitea-repositories
security: INSTALL_LOCK=true
server: |-
APP_DATA_PATH=/data
DOMAIN=gitea.runner.c-one-infra.de
ENABLE_PPROF=false
HTTP_PORT=3000
PROTOCOL=http
ROOT_URL=https://gitea.runner.c-one-infra.de:443
SSH_DOMAIN=gitea.runner.c-one-infra.de
SSH_LISTEN_PORT=2222
SSH_PORT=22
START_SSH_SERVER=true
session: |-
PROVIDER=memory
PROVIDER_CONFIG=
---
# Source: forgejo/templates/gitea/config.yaml
apiVersion: v1
kind: Secret
metadata:
name: forgejo
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
assertions: |
config_environment.sh: |-
#!/usr/bin/env bash
set -euo pipefail
function env2ini::log() {
printf "${1}\n"
}
function env2ini::read_config_to_env() {
local section="${1}"
local line="${2}"
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
if [[ -z "${section}" ]]; then
export "FORGEJO____${setting^^}=${value}" # '^^' makes the variable content uppercase
return
fi
local masked_section="${section//./_0X2E_}" # '//' instructs to replace all matches
masked_section="${masked_section//-/_0X2D_}"
export "FORGEJO__${masked_section^^}__${setting^^}=${value}" # '^^' makes the variable content uppercase
}
function env2ini::reload_preset_envs() {
env2ini::log "Reloading preset envs..."
while read -r line; do
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
export "${setting^^}=${value}" # '^^' makes the variable content uppercase
done < "/tmp/existing-envs"
rm /tmp/existing-envs
}
function env2ini::process_config_file() {
local config_file="${1}"
local section="$(basename "${config_file}")"
if [[ $section == '_generals_' ]]; then
env2ini::log " [ini root]"
section=''
else
env2ini::log " ${section}"
fi
while read -r line; do
env2ini::read_config_to_env "${section}" "${line}"
done < <(awk 1 "${config_file}") # Helm .toYaml trims the trailing new line which breaks line processing; awk 1 ... adds it back while reading
}
function env2ini::load_config_sources() {
local path="${1}"
if [[ -d "${path}" ]]; then
env2ini::log "Processing $(basename "${path}")..."
while read -d '' configFile; do
env2ini::process_config_file "${configFile}"
done < <(find "${path}" -type l -not -name '..data' -print0)
env2ini::log "\n"
fi
}
function env2ini::generate_initial_secrets() {
# These environment variables will either be
# - overwritten with user defined values,
# - initially used to set up Forgejo
# Anyway, they won't harm existing app.ini files
export FORGEJO__SECURITY__INTERNAL_TOKEN=$(gitea generate secret INTERNAL_TOKEN)
export FORGEJO__SECURITY__SECRET_KEY=$(gitea generate secret SECRET_KEY)
export FORGEJO__OAUTH2__JWT_SECRET=$(gitea generate secret JWT_SECRET)
export FORGEJO__SERVER__LFS_JWT_SECRET=$(gitea generate secret LFS_JWT_SECRET)
env2ini::log "...Initial secrets generated\n"
}
# save existing envs prior to script execution. Necessary to keep order of
# preexisting and custom envs
env | (grep -e '^FORGEJO__' || [[ $? == 1 ]]) > /tmp/existing-envs
# MUST BE CALLED BEFORE OTHER CONFIGURATION
env2ini::generate_initial_secrets
env2ini::load_config_sources '/env-to-ini-mounts/inlines/'
env2ini::load_config_sources '/env-to-ini-mounts/additionals/'
# load existing envs to override auto generated envs
env2ini::reload_preset_envs
env2ini::log "=== All configuration sources loaded ===\n"
# safety to prevent rewrite of secret keys if an app.ini already exists
if [ -f ${GITEA_APP_INI} ]; then
env2ini::log 'An app.ini file already exists. To prevent overwriting secret keys, these settings are dropped and remain unchanged:'
env2ini::log ' - security.INTERNAL_TOKEN'
env2ini::log ' - security.SECRET_KEY'
env2ini::log ' - oauth2.JWT_SECRET'
env2ini::log ' - server.LFS_JWT_SECRET'
unset FORGEJO__SECURITY__INTERNAL_TOKEN
unset FORGEJO__SECURITY__SECRET_KEY
unset FORGEJO__OAUTH2__JWT_SECRET
unset FORGEJO__SERVER__LFS_JWT_SECRET
fi
environment-to-ini -o $GITEA_APP_INI
---
# Source: forgejo/templates/gitea/init.yaml
apiVersion: v1
kind: Secret
metadata:
name: forgejo-init
namespace: "gitea"
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
configure_gpg_environment.sh: |-
#!/usr/bin/env bash
set -eu
gpg --batch --import /raw/private.asc
init_directory_structure.sh: |-
#!/usr/bin/env bash
set -euo pipefail
set -x
mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure
mkdir -p "${GITEA_TEMP}"
chmod ug+rwx "${GITEA_TEMP}"
configure_gitea.sh: |-
#!/usr/bin/env bash
set -euo pipefail
echo '==== BEGIN GITEA CONFIGURATION ===='
{ # try
gitea migrate
} || { # catch
echo "Forgejo migrate might fail due to database connection...This init-container will try again in a few seconds"
exit 1
}
function configure_admin_user() {
local full_admin_list=$(gitea admin user list --admin)
local actual_user_table=''
# We might have distorted output due to warning logs, so we have to detect the actual user table by its headline and trim output above that line
local regex="(.*)(ID\s+Username\s+Email\s+IsActive.*)"
if [[ "${full_admin_list}" =~ $regex ]]; then
actual_user_table=$(echo "${BASH_REMATCH[2]}" | tail -n+2) # tail'ing to drop the table headline
else
# This code block should never be reached, as long as the output table header remains the same.
# If this code block is reached, the regex doesn't match anymore and we probably have to adjust this script.
echo "ERROR: 'configure_admin_user' was not able to determine the current list of admin users."
echo " Please review the output of 'gitea admin user list --admin' shown below."
echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-chart/issues."
echo "DEBUG: Output of 'gitea admin user list --admin'"
echo "--"
echo "${full_admin_list}"
echo "--"
exit 1
fi
local ACCOUNT_ID=$(echo "${actual_user_table}" | grep -E "\s+${GITEA_ADMIN_USERNAME}\s+" | awk -F " " "{printf \$1}")
if [[ -z "${ACCOUNT_ID}" ]]; then
local -a create_args
create_args=(--admin --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --email "gitea@local.domain")
if [[ "${GITEA_ADMIN_PASSWORD_MODE}" = initialOnlyRequireReset ]]; then
create_args+=(--must-change-password=true)
else
create_args+=(--must-change-password=false)
fi
echo "No admin user '${GITEA_ADMIN_USERNAME}' found. Creating now..."
gitea admin user create "${create_args[@]}"
echo '...created.'
else
if [[ "${GITEA_ADMIN_PASSWORD_MODE}" = keepUpdated ]]; then
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist. Running update to sync password..."
local -a change_args
change_args=(--username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --must-change-password=false)
gitea admin user change-password "${change_args[@]}"
echo '...password sync done.'
else
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist, but update mode is set to '${GITEA_ADMIN_PASSWORD_MODE}'. Skipping."
fi
fi
}
configure_admin_user
function configure_ldap() {
echo 'no ldap configuration... skipping.'
}
configure_ldap
function configure_oauth() {
echo 'no oauth configuration... skipping.'
}
configure_oauth
echo '==== END GITEA CONFIGURATION ===='
---
# Source: forgejo/templates/gitea/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-shared-storage
namespace: "gitea"
annotations:
helm.sh/resource-policy: keep
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 5Gi
---
# Source: forgejo/templates/gitea/http-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: forgejo-http
namespace: "gitea"
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
annotations:
{}
spec:
type: ClusterIP
clusterIP: None
ports:
- name: http
port: 3000
targetPort:
selector:
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
---
# Source: forgejo/templates/gitea/ssh-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: forgejo-ssh
namespace: "gitea"
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
annotations:
{}
spec:
type: NodePort
externalTrafficPolicy: Local
ports:
- name: ssh
port: 22
targetPort: 2222
protocol: TCP
nodePort: 32222
selector:
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
---
# Source: forgejo/templates/gitea/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: forgejo
namespace: "gitea"
annotations:
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 100%
selector:
matchLabels:
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
template:
metadata:
annotations:
checksum/config: bd40d1abc2e2692fafe0821e9345e231261f194a19fbee5aef7dd9cdfc106596
labels:
helm.sh/chart: forgejo-0.0.0
app: forgejo
app.kubernetes.io/name: forgejo
app.kubernetes.io/instance: forgejo
app.kubernetes.io/version: "9.0.2"
version: "9.0.2"
app.kubernetes.io/managed-by: Helm
spec:
securityContext:
fsGroup: 1000
initContainers:
- name: init-directories
image: "code.forgejo.org/forgejo/forgejo:9.0.2-rootless"
imagePullPolicy: IfNotPresent
command: ["/usr/sbin/init_directory_structure.sh"]
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
volumeMounts:
- name: init
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
securityContext:
{}
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
- name: init-app-ini
image: "code.forgejo.org/forgejo/forgejo:9.0.2-rootless"
imagePullPolicy: IfNotPresent
command: ["/usr/sbin/config_environment.sh"]
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
volumeMounts:
- name: config
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
- name: inline-config-sources
mountPath: /env-to-ini-mounts/inlines/
securityContext:
{}
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
- name: configure-gitea
image: "code.forgejo.org/forgejo/forgejo:9.0.2-rootless"
command: ["/usr/sbin/configure_gitea.sh"]
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 1000
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: HOME
value: /data/gitea/git
- name: GITEA_ADMIN_USERNAME
valueFrom:
secretKeyRef:
key: username
name: gitea-credential
- name: GITEA_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: gitea-credential
- name: GITEA_ADMIN_PASSWORD_MODE
value: keepUpdated
volumeMounts:
- name: init
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
terminationGracePeriodSeconds: 60
containers:
- name: forgejo
image: "code.forgejo.org/forgejo/forgejo:9.0.2-rootless"
imagePullPolicy: IfNotPresent
env:
# SSH Port values have to be set here as well for openssh configuration
- name: SSH_LISTEN_PORT
value: "2222"
- name: SSH_PORT
value: "22"
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TMPDIR
value: /tmp/gitea
- name: HOME
value: /data/gitea/git
ports:
- name: ssh
containerPort: 2222
- name: http
containerPort: 3000
livenessProbe:
failureThreshold: 10
initialDelaySeconds: 200
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
resources:
{}
securityContext:
{}
volumeMounts:
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
volumes:
- name: init
secret:
secretName: forgejo-init
defaultMode: 110
- name: config
secret:
secretName: forgejo
defaultMode: 110
- name: inline-config-sources
secret:
secretName: forgejo-inline-config
- name: temp
emptyDir: {}
- name: data
persistentVolumeClaim:
claimName: gitea-shared-storage

View file

@ -0,0 +1,816 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
proxy-buffer-size: "32k"
use-forwarded-headers: "true"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
# Omit Ingress status permissions if `--update-status` is disabled.
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-metrics.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-metrics
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: metrics
port: 10254
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: NodePort
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv4
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
replicas: 1
revisionHistoryLimit: 10
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 0
template:
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: registry.k8s.io/ingress-nginx/controller:v1.11.3@sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
- --enable-ssl-passthrough
- --publish-status-address=localhost
securityContext:
runAsNonRoot: true
runAsUser: 101
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
readOnlyRootFilesystem: false
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
hostPort: 80
- name: https
containerPort: 443
protocol: TCP
hostPort: 443
- name: metrics
containerPort: 10254
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
ingress-ready: "true"
kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 0
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/controller-poddisruptionbudget.yaml
# PDB is not supported for DaemonSets.
# https://github.com/kubernetes/kubernetes/issues/108124
---
# Source: ingress-nginx/templates/controller-servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
release: ingress-nginx
spec:
endpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- ingress-nginx
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
port: 443
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4@sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.11.3
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.11.3"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4@sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux

View file

@ -5,4 +5,9 @@ image:
pullPolicy: IfNotPresent
tag: "6.0.1"
forgejoUrl: http://forgejo-http.gitea.svc.cluster.local:3000
forgejoUrl: https://gitea.{{{ .Env.DOMAIN }}}
runnerLabels:
- docker:docker://node:20-bullseye
- self-hosted:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-latest:docker://node:20-bullseye