Compare commits

..

No commits in common. "main" and "v0.3.0" have entirely different histories.
main ... v0.3.0

97 changed files with 4729 additions and 5925 deletions

96
.drone.yml Normal file
View file

@ -0,0 +1,96 @@
---
kind: pipeline
type: docker
name: lint
platform:
os: linux
arch: arm64
steps:
- name: helm lint
pull: always
image: alpine:3.17
commands:
- apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing helm
- helm lint
- name: helm template
pull: always
image: alpine:3.17
commands:
- apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing helm
- helm dependency update
- helm template --debug gitea-helm .
- name: helm unittests
pull: always
image: alpine:3.17
commands:
- apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing make helm git bash
- helm plugin install https://github.com/heyhabito/helm-unittest
- helm dependency update
- make unittests
- name: verify readme
pull: always
image: alpine:3.17
commands:
- apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing make npm git
- make readme
- git diff --exit-code --name-only README.md
- name: discord
pull: always
image: appleboy/drone-discord:1.2.4
environment:
DISCORD_WEBHOOK_ID:
from_secret: discord_webhook_id
DISCORD_WEBHOOK_TOKEN:
from_secret: discord_webhook_token
when:
status:
- changed
- failure
---
kind: pipeline
type: docker
name: release-version
platform:
os: linux
arch: arm64
trigger:
event:
- tag
steps:
- name: generate-chart
pull: always
image: alpine:3.17
commands:
- apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/testing helm
- apk add --no-cache curl
- helm dependency update
- helm package --version "${DRONE_TAG##v}" ./
- mkdir gitea
- mv gitea*.tgz gitea/
- curl -L -o gitea/index.yaml https://dl.gitea.io/charts/index.yaml
- helm repo index gitea/ --url https://dl.gitea.io/charts --merge gitea/index.yaml
- name: upload-chart
pull: always
image: plugins/s3:latest
settings:
bucket: gitea-artifacts
endpoint: https://ams3.digitaloceanspaces.com
access_key:
from_secret: aws_access_key_id
secret_key:
from_secret: aws_secret_access_key
source: gitea/*
target: /charts
strip_prefix: gitea/

View file

@ -1,12 +0,0 @@
# EditorConfig is awesome: https://EditorConfig.org
# top-most EditorConfig file
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = false
insert_final_newline = true

View file

@ -1,26 +0,0 @@
# action.yml
name: setup-docker
description: 'setup docker'
runs:
using: 'composite'
steps:
- shell: bash
name: create cache
run: |
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update -qq
apt-get -q install -qq \
containerd.io \
docker-ce \
docker-ce-cli \
;
- shell: bash
run: docker info

View file

@ -1,25 +0,0 @@
# action.yml
name: setup-k3s
description: 'setup k3s'
inputs:
version:
description: 'k3s version'
required: true
runs:
using: 'composite'
steps:
- shell: bash
name: install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${INPUT_VERSION} K3S_KUBECONFIG_MODE=640 sh -s - server
echo "KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> $GITHUB_ENV
- shell: bash
name: check k3s
run: kubectl cluster-info
- shell: bash
name: wait for nodes ready
run: |
sleep 3
kubectl wait --for=condition=Ready nodes --all --timeout=600s

View file

@ -1,19 +0,0 @@
# action.yml
name: setup-node
description: 'setup node'
runs:
using: 'composite'
steps:
- name: Setup pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
with:
standalone: true
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
with:
node-version-file: .node-version
cache: 'pnpm'
- shell: bash
run: pnpm install --frozen-lockfile

View file

@ -1,27 +0,0 @@
# action.yml
name: setup
description: 'setup system'
runs:
using: 'composite'
steps:
- shell: bash
name: create cache
run: |
mkdir -p /opt/hostedtoolcache
mkdir -p /srv/forgejo-renovate/.cache/act/tool_cache
- shell: bash
name: install deps
run: |
apt-get update -qq
apt-get -q install -qq \
ca-certificates \
curl \
gnupg \
make \
python3 \
python3-wheel \
python3-venv \
unzip \
wget \
;

View file

@ -1,57 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"packageRules": [
{
"description": "Separate minor and patch updates for k3s",
"matchDatasources": ["github-releases"],
"matchPackageNames": ["k3s-io/k3s"],
"separateMultipleMinor": true,
"separateMinorPatch": true,
"branchTopic": "{{{depNameSanitized}}}{{#if isMinor}}-minor{{/if}}-{{{newMajor}}}{{#if isPatch}}.{{{newMinor}}}{{/if}}.x{{#if isLockfileUpdate}}-lockfile{{/if}}",
"commitMessageSuffix": "{{#if isMinor}}(minor){{/if}}{{#if isPatch}}(patch){{/if}}"
},
{
"description": "No automerge for k3s major and minor updates",
"matchDatasources": ["github-releases"],
"matchPackageNames": ["k3s-io/k3s"],
"matchUpdateTypes": ["major", "minor"],
"automerge": false
},
{
"description": "Group k3s patch updates",
"matchDatasources": ["github-releases"],
"matchPackageNames": ["k3s-io/k3s"],
"matchUpdateTypes": ["patch"],
"groupName": "k3s"
},
{
"description": "Disable k3s major and minor updates for old versions",
"matchDatasources": ["github-releases"],
"matchFileNames": [".forgejo/workflows/**"],
"matchPackageNames": ["k3s-io/k3s"],
"matchUpdateTypes": ["major", "minor"],
"matchCurrentValue": "!/^v1.32/",
"enabled": false
}
],
"customDatasources": {
"k3s": {
"defaultRegistryUrlTemplate": "https://update.k3s.io/v1-release/channels",
"transformTemplates": [
"($isVersion:=function($name){$contains($name,/^v\\d+.\\d+$/)};{\"releases\":[data[$isVersion(name)].{\"version\":latest}],\"sourceUrl\":\"https://github.com/k3s-io/k3s\",\"homepage\":\"https://k3s.io/\"})"
]
}
},
"customManagers": [
{
"customType": "regex",
"fileMatch": [".forgejo/renovate/k3s.json"],
"matchStrings": [
"matchCurrentValue\": \"!\\/^v(?<currentValue>\\d+\\.\\d+)\\/"
],
"depNameTemplate": "k3s",
"versioningTemplate": "npm",
"datasourceTemplate": "custom.k3s"
}
]
}

View file

@ -1,205 +0,0 @@
name: build
on:
pull_request:
push:
branches:
- main
- maint/**
tags:
- v*
workflow_dispatch:
permissions:
contents: read
env:
HELM_VERSION: v3.17.2 # renovate: datasource=github-releases depName=helm packageName=helm/helm
HELM_UNITTEST_VERSION: v0.7.2 # renovate: datasource=github-releases depName=helm-unittest packageName=helm-unittest/helm-unittest
HELM_CHART_TESTING_VERSION: v3.12.0 # renovate: datasource=github-releases depName=chart-testing packageName=helm/chart-testing
KUBECTL_VERSION: v1.32.3 # renovate: datasource=github-releases depName=kubectl packageName=kubernetes/kubernetes
CT_GITHUB_GROUPS: true
jobs:
lint-node:
runs-on: docker
steps:
- run: cat /etc/os-release
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
show-progress: false
fetch-depth: 0 # Important for changelog
filter: blob:none # We don't need all blobs
- uses: ./.forgejo/actions/setup
- uses: ./.forgejo/actions/setup-node
- run: pnpm prettier
- run: pnpm markdownlint .
- run: make readme
- run: git diff --exit-code --name-only README.md
- name: changelog
run: |
pnpm changelog ${{ github.ref_type == 'tag' && 'true' || '' }}
lint-helm:
runs-on: docker
steps:
- run: cat /etc/os-release
- run: ps axf
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
show-progress: false
fetch-depth: 0
filter: blob:none # We don't need all blobs
- uses: ./.forgejo/actions/setup
- name: install chart-testing
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
with:
version: ${{ env.HELM_CHART_TESTING_VERSION }}
- name: install helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: ${{ env.HELM_VERSION }}
- name: install helm-unittest
run: helm plugin install --version ${{ env.HELM_UNITTEST_VERSION }} https://github.com/helm-unittest/helm-unittest
- run: helm dependency build
- run: yamllint -f colored .
- run: helm lint
- run: helm template --debug gitea-helm .
- run: make unittests
- run: ct lint --config tools/ct.yml --charts .
e2e:
needs:
- lint-node
- lint-helm
runs-on: k8s
strategy:
matrix:
k3s:
# https://github.com/k3s-io/k3s/branches
# oldest supported version
- v1.28.15+k3s1 # renovate: k3s
# https://github.com/k3s-io/k3s/blob/master/channel.yaml#L3-L4
# stable version
- v1.31.6+k3s1 # renovate: k3s
# newest version
- v1.32.2+k3s1 # renovate: k3s
steps:
- run: cat /etc/os-release
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
show-progress: false
fetch-depth: 0
filter: blob:none # We don't need all blobs
- uses: ./.forgejo/actions/setup
- name: install helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: ${{ env.HELM_VERSION }}
- name: Install chart-testing
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
with:
version: ${{ env.HELM_CHART_TESTING_VERSION }}
- uses: ./.forgejo/actions/setup-k3s
with:
version: ${{ matrix.k3s }}
- run: kubectl get no -o wide
- name: install chart
uses: https://github.com/nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
with:
timeout_minutes: 15
max_attempts: 3
retry_on: error
retry_wait_seconds: 120
polling_interval_seconds: 5
command: ct install --config tools/ct.yml --charts .
# # Catch-all required check for test matrix
test-success:
needs:
- lint-node
- lint-helm
- e2e
runs-on: docker
timeout-minutes: 1
if: always()
steps:
- name: Fail for failed or cancelled lint-node
if: |
needs.lint-node.result == 'failure' ||
needs.lint-node.result == 'cancelled'
run: exit 1
- name: Fail for failed or cancelled lint-helm
if: |
needs.lint-helm.result == 'failure' ||
needs.lint-helm.result == 'cancelled'
run: exit 1
- name: Fail for failed or cancelled e2e
if: |
needs.e2e.result == 'failure' ||
needs.e2e.result == 'cancelled'
run: exit 1
publish:
runs-on: docker
needs:
- test-success
if: ${{ github.ref_type == 'tag' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
show-progress: false
fetch-depth: 0 # Important for changelog
filter: blob:none # We don't need all blobs
- uses: ./.forgejo/actions/setup
- uses: ./.forgejo/actions/setup-node
- name: install helm
uses: https://github.com/azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: ${{ env.HELM_VERSION }}
- run: helm dependency build
- run: helm package --version "${GITHUB_REF_NAME#v}" -d tmp/ ./
- name: login to registries
run: |
echo ${CODEBERG_TOKEN} | helm registry login -u viceice --password-stdin codeberg.org/forgejo-contrib
echo ${FORGEJO_TOKEN} | helm registry login -u viceice --password-stdin code.forgejo.org/forgejo-contrib
echo ${FORGEJO_TOKEN} | helm registry login -u viceice --password-stdin code.forgejo.org/forgejo-helm
env:
CODEBERG_TOKEN: ${{secrets.API_TOKEN}}
FORGEJO_TOKEN: ${{secrets.FORGEJO_API_TOKEN}}
- name: publish forgejo helm chart
run: |
helm push tmp/forgejo-${GITHUB_REF_NAME#v}.tgz oci://codeberg.org/forgejo-contrib
helm push tmp/forgejo-${GITHUB_REF_NAME#v}.tgz oci://code.forgejo.org/forgejo-contrib
helm push tmp/forgejo-${GITHUB_REF_NAME#v}.tgz oci://code.forgejo.org/forgejo-helm
- name: publish forgejo release
run: pnpm forgejo:release

View file

@ -1,26 +0,0 @@
on:
schedule:
- cron: '@hourly'
push:
branches:
- 'main'
workflow_dispatch:
jobs:
mirror:
runs-on: docker
steps:
- name: git mirror branches {main,maint/*] & tags
run: |
git init --bare .
git remote add origin https://code.forgejo.org/${{ env.GITHUB_REPOSITORY }}
git fetch origin refs/heads/main:refs/mirror/main --tags
git ls-remote origin refs/heads/main/* | while read sha full_ref ; do
ref=${full_ref#refs/heads/}
git fetch origin $full_ref:refs/mirror/$ref
done
git push --force https://any:$CODEBERG_TOKEN@codeberg.org/forgejo-contrib/forgejo-helm refs/mirror/*:refs/heads/* --tags
env:
CODEBERG_TOKEN: ${{secrets.CODEBERG_TOKEN}}

View file

@ -0,0 +1,41 @@
<!--
Before you open the request please review the following guidelines and tips to help it be more easily integrated:
- Describe the scope of your change - i.e. what the change does.
- Describe any known limitations with your change.
- Please run any tests or examples that can exercise your modified code.
Thank you for contributing! We will try to review, test and integrate the change as soon as we can.
-->
### Description of the change
<!-- Describe the scope of your change - i.e. what the change does. -->
### Benefits
<!-- What benefits will be realized by the code change? -->
### Possible drawbacks
<!-- Describe any known limitations with your change -->
### Applicable issues
<!-- Enter any applicable Issues here (You can reference an issue using #). Please remove this section if there is no referenced issue. -->
- fixes #
### Additional information
<!-- If there's anything else that's important and relevant to your pull request, mention that information here. Please remove this section if it remains empty. -->
### ⚠ BREAKING
<!-- If there's a breaking change, please shortly describe in which way users are affected and how they can mitigate it. If there are no breakings, please remove this section. -->
### Checklist
<!-- [Place an '[X]' (no spaces) in all applicable fields. Please remove unrelated fields.] -->
- [ ] Parameters are documented in the `values.yaml` and added to the `README.md` using [readme-generator-for-helm](https://github.com/bitnami-labs/readme-generator-for-helm)
- [ ] Breaking changes are documented in the `README.md`

2
.gitignore vendored
View file

@ -4,5 +4,3 @@ node_modules/
unittests/*/__snapshot__/
tmp/
tmpcharts/
.pnpm-store/

View file

@ -43,23 +43,3 @@ unittests/
.woodpecker/
tmp/
artifacthub-repo.yml
ci/
.forgejo/
e2e/
.husky/
tools/
.git/
.editorconfig
.lintstagedrc.json
.editorconfig
.gitignore
.helmignore
.node-version
.prettier*
.yamllint
artifacthub*
renovate.json
pnpm-lock.yaml
.pnpm-store/

View file

@ -1,3 +0,0 @@
#!/bin/sh
pnpm lint-staged

View file

@ -1,6 +0,0 @@
{
"*.sh": "shellcheck",
".husky/*": "shellcheck",
"!*.{sh,md}": "prettier --cache --ignore-unknown --write",
"*.md": ["markdownlint --fix", "prettier --cache --write"]
}

View file

@ -10,11 +10,11 @@ extends: null
# MD003/heading-style/header-style - Heading style
MD003:
# Heading style
style: 'atx'
style: "atx"
# MD004/ul-style - Unordered list style
MD004:
style: 'dash'
style: "dash"
# MD007/ul-indent - Unordered list indentation
MD007:
@ -47,7 +47,7 @@ MD013:
# Number of characters
line_length: 200
# Number of characters for headings
heading_line_length: 100
heading_line_length: 80
# Number of characters for code blocks
code_block_line_length: 80
# Include code blocks
@ -56,6 +56,8 @@ MD013:
tables: false
# Include headings
headings: true
# Include headings
headers: true
# Strict length checking
strict: false
# Stern length checking
@ -71,7 +73,7 @@ MD022:
# MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content
MD024:
# Only check sibling headings
siblings_only: true
allow_different_nesting: true
# MD025/single-title/single-h1 - Multiple top-level headings in the same document
MD025:
@ -83,12 +85,12 @@ MD025:
# MD026/no-trailing-punctuation - Trailing punctuation in heading
MD026:
# Punctuation characters
punctuation: '.,;:!。,;:!'
punctuation: ".,;:!。,;:!"
# MD029/ol-prefix - Ordered list item prefix
MD029:
# List style
style: 'one_or_ordered'
style: "one_or_ordered"
# MD030/list-marker-space - Spaces after list markers
MD030:
@ -104,17 +106,17 @@ MD030:
# MD033/no-inline-html - Inline HTML
MD033:
# Allowed elements
allowed_elements: [details, summary]
allowed_elements: []
# MD035/hr-style - Horizontal rule style
MD035:
# Horizontal rule style
style: '---'
style: "---"
# MD036/no-emphasis-as-heading/no-emphasis-as-header - Emphasis used instead of a heading
MD036:
# Punctuation characters
punctuation: '.,;:!?。,;:!?'
punctuation: ".,;:!?。,;:!?"
# MD041/first-line-heading/first-line-h1 - First line in a file should be a top-level heading
MD041:
@ -129,20 +131,21 @@ MD044:
names:
- Gitea
- PostgreSQL
- MariaDB
- MySQL
- Memcached
- Prometheus
- Git
- GitOps
- Forgejo
# Include code blocks
code_blocks: false
# MD046/code-block-style - Code block style
MD046:
# Block style
style: 'fenced'
style: "fenced"
# MD048/code-fence-style - Code fence style
MD048:
# Code fence syle
style: 'backtick'
style: "backtick"

View file

@ -2,6 +2,3 @@
node_modules/
charts/
.helmignore
Chart.lock
.pnpm-store/

View file

@ -1 +0,0 @@
22.14.0

4
.npmrc
View file

@ -1,5 +1 @@
engine-strict=true
# pnpm run settings
# https://pnpm.io/cli/run
shell-emulator = true

View file

@ -1,7 +0,0 @@
Chart.lock
node_modules/
pnpm-lock.yaml
.pnpm-store/
# won't work
templates/**/*.yaml

View file

@ -1,4 +0,0 @@
{
"singleQuote": true,
"trailingComma": "all"
}

View file

@ -1,8 +0,0 @@
{
"recommendations": [
"yzhang.markdown-all-in-one",
"DavidAnson.vscode-markdownlint",
"Tim-Koehler.helm-intellisense",
"esbenp.prettier-vscode"
]
}

12
.vscode/settings.json vendored
View file

@ -1,12 +0,0 @@
{
"yaml.schemas": {
"https://json.schemastore.org/github-workflow.json": [
".github/workflows/*",
".forgejo/workflows/*"
],
"https://raw.githubusercontent.com/helm-unittest/helm-unittest/v0.7.2/schema/helm-testsuite.json": [
"/unittests/**/*.yaml"
]
},
"yaml.schemaStore.enable": true
}

50
.woodpecker/lint.yml Normal file
View file

@ -0,0 +1,50 @@
platform: linux/amd64
when:
event:
- pull_request
- tag
- push
branch:
- main
- release/**
pipeline:
deps:
image: alpine:3.17.1
pull: true
commands:
- apk add --no-cache helm
- helm repo add bitnami https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
- helm dependency build
helm-lint:
image: alpine:3.17.1
pull: true
commands:
- apk add --no-cache helm
- helm lint
helm-template:
image: alpine:3.17.1
pull: true
commands:
- apk add --no-cache helm
- helm template --debug gitea-helm .
helm-unittests:
image: alpine:3.17.1
pull: true
commands:
- apk add --no-cache make helm git bash
- helm plugin install https://github.com/quintush/helm-unittest
- helm dependency update
- make unittests
verify-readme:
image: alpine:3.17.1
pull: true
commands:
- apk add --no-cache make nodejs npm git
- make readme
- git diff --exit-code --name-only README.md

View file

@ -0,0 +1,46 @@
platform: linux/amd64
depends_on:
- lint
when:
event:
- tag
tag: v*
pipeline:
generate-chart:
image: alpine:3.17.1
pull: true
commands:
- apk add --no-cache git nodejs npm helm
- helm repo add bitnami https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
- helm dependency build
- rm -rf tmp/
- helm package --version "${CI_COMMIT_TAG##v}" -d tmp/ ./
- npm ci
- npm run changelog "${CI_COMMIT_TAG##v}" tmp/changelog.md
secrets:
- token
publish-release:
image: plugins/gitea-release
pull: true
settings:
base_url: https://codeberg.org
api_key:
from_secret: token
files: tmp/*.tgz
title: ${CI_COMMIT_TAG##v}
file_exists: fail
note: tmp/changelog.md
publish-chart:
image: alpine:3.17.1
pull: true
commands:
- apk add --no-cache helm
- echo $${TOKEN} | helm registry login -u viceice --password-stdin codeberg.org/forgejo-contrib
- helm push tmp/forgejo-${CI_COMMIT_TAG##v}.tgz oci://codeberg.org/forgejo-contrib
secrets:
- token

25
.woodpecker/renovate.yml Normal file
View file

@ -0,0 +1,25 @@
platform: linux/amd64
when:
event:
- cron
pipeline:
renovate:
image: ghcr.io/visualon/renovate:34.105.6
pull: true
commands:
- renovate $${CI_REPO}
environment:
RENOVATE_PLATFORM: gitea
RENOVATE_ENDPOINT: https://codeberg.org
LOG_LEVEL: debug
secrets:
- source: token
target: renovate_token
- source: gh_token
target: github_com_token
when:
- event:
- cron
cron: renovate

View file

@ -1,20 +0,0 @@
---
extends: default
ignore: |
.yamllint
node_modules
templates
rules:
truthy:
allowed-values: ['true', 'false']
check-keys: False
level: error
line-length: disable
document-start: disable
comments:
min-spaces-from-content: 1
braces:
max-spaces-inside: 2

View file

@ -9,16 +9,20 @@ refactorings for easier maintainability or documentation improvements.
- [`helm`](https://helm.sh/docs/intro/install/)
- `make` is optional; you may call the commands directly
When using Visual Studio Code as IDE, a [ready-to-use profile](.vscode/) is available.
When using Visual Studio Code as IDE, following plugins might be useful:
- [Markdown All in One](https://marketplace.visualstudio.com/items?itemName=yzhang.markdown-all-in-one)
- [markdownlint](https://marketplace.visualstudio.com/items?itemName=DavidAnson.vscode-markdownlint)
- [Helm Intellisense](https://marketplace.visualstudio.com/items?itemName=Tim-Koehler.helm-intellisense)
## Documentation Requirements
The `README.md` must include all configuration options.
The parameters section is generated by extracting the parameter annotations from the `values.yaml` file, by using [this tool](https://github.com/bitnami-labs/readme-generator-for-helm).
The `README.md` must include all configuration options. The parameters section
is generated by extracting the parameter annotations from the `values.yaml` file,
by using [this tool](https://github.com/bitnami-labs/readme-generator-for-helm).
If changes were made on configuration options, run `make readme` to update the README file.
The ToC is created via the VSCode [Markdown All in One](https://marketplace.visualstudio.com/items?itemName=yzhang.markdown-all-in-one) extension which can/must also be used used to update it.
If changes were made on configuration options, run `make readme` to update the
README file.
## Pull Request Requirements
@ -36,30 +40,23 @@ For local development and testing of pull requests, the following workflow can
be used:
1. Install `minikube` and `helm`.
1. Start a `minikube` cluster via `minikube start`.
1. From the `forgejo-contrib/forgejo-helm` directory execute the following command.
This will install the dependencies listed in `Chart.yml` and deploy the current state of the helm chart found locally.
If you want to test a branch, make sure to switch to the respective branch first.
`helm install --dependency-update forgejo . -f values.yaml`.
1. Forgejo is now deployed in `minikube`.
To access it, it's port needs to be forwarded first from `minikube` to localhost first via `kubectl --namespace
default port-forward svc/gitea-http 3000:3000`.
Now Forgejo is accessible at [http://localhost:3000](http://localhost:3000).
2. Start a `minikube` cluster via `minikube start`.
3. From the `gitea/helm-chart` directory execute the following command. This
will install the dependencies listed in `Chart.yml` and deploy the current
state of the helm chart found locally. If you want to test a branch, make
sure to switch to the respective branch first.
`helm install --dependency-update gitea . -f values.yaml`.
4. Gitea is now deployed in `minikube`. To access it, it's port needs to be
forwarded first from `minikube` to localhost first via `kubectl --namespace
default port-forward svc/gitea-http 3000:3000`. Now Gitea is accessible at
[http://localhost:3000](http://localhost:3000).
### Unit tests
```bash
# install the unittest plugin
$ helm plugin install https://github.com/helm-unittest/helm-unittest
$ helm plugin install https://github.com/heyhabito/helm-unittest
# run the unittests
make unittests
```
See [plugin documentation](https://github.com/helm-unittest/helm-unittest/blob/main/DOCUMENT.md) for usage instructions.
## Release process
1. Create a tag following the tagging schema
1. Push the tag
1. Let CI do it's work

View file

@ -1,18 +1,15 @@
dependencies:
- name: common
repository: oci://ghcr.io/visualon/bitnamicharts
version: 2.30.0
- name: memcached
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
version: 6.3.5
- name: mysql
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
version: 9.4.6
- name: postgresql
repository: oci://ghcr.io/visualon/bitnamicharts
version: 16.5.6
- name: postgresql-ha
repository: oci://ghcr.io/visualon/bitnamicharts
version: 15.3.8
- name: redis-cluster
repository: oci://ghcr.io/visualon/bitnamicharts
version: 11.4.6
- name: redis
repository: oci://ghcr.io/visualon/bitnamicharts
version: 20.11.4
digest: sha256:a9c9f0779663336dd22ca4896f22bb64427e28f20aa567aee2f18474f8e31a23
generated: "2025-03-26T15:31:33.532188569Z"
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
version: 12.1.9
- name: mariadb
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
version: 11.4.4
digest: sha256:9d5f8b986b2cc244d32ceb6165399deaae5a4a6a6df955e2a7b0e8f36c0146a9
generated: "2023-01-19T11:57:21.482881836Z"

View file

@ -3,8 +3,8 @@ name: forgejo
description: Forgejo Helm chart for Kubernetes
type: application
version: 0.0.0
appVersion: 10.0.3
icon: https://code.forgejo.org/forgejo/forgejo/raw/branch/forgejo/assets/logo.svg
appVersion: 1.18.1-0
icon: https://design.codeberg.org/logo-kit/icon.svg
home: https://forgejo.org/
keywords:
@ -16,41 +16,27 @@ keywords:
- gitea
- gogs
sources:
- https://code.forgejo.org/forgejo-helm/forgejo-helm
- https://codeberg.org/forgejo-contrib/forgejo-helm
- https://codeberg.org/forgejo/forgejo
maintainers:
- name: Michael Kriese
email: michael.kriese@visualon.de
# Bitnami charts are served from ghcr mirror because of rate limiting on Docker Hub
# https://hub.docker.com/u/bitnamicharts
# https://blog.bitnami.com/2023/01/bitnami-helm-charts-available-as-oci.html
# https://github.com/bitnami/charts/issues/30853
# https://code.forgejo.org/forgejo-helm/forgejo-helm/issues/1045
# Bitnami charts are served from GitHub CDN - See https://github.com/bitnami/charts/issues/10833 for details
dependencies:
# https://github.com/bitnami/charts/blob/main/bitnami/common/Chart.yaml
- name: common
repository: oci://ghcr.io/visualon/bitnamicharts
tags:
- bitnami-common
version: 2.30.0
# https://github.com/bitnami/charts/blob/main/bitnami/postgresql/Chart.yaml
- name: memcached
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
version: 6.3.5
condition: memcached.enabled
- name: mysql
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
version: 9.4.6
condition: mysql.enabled
- name: postgresql
repository: oci://ghcr.io/visualon/bitnamicharts
version: 16.5.6
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
version: 12.1.9
condition: postgresql.enabled
# https://github.com/bitnami/charts/blob/main/bitnami/postgresql-ha/Chart.yaml
- name: postgresql-ha
repository: oci://ghcr.io/visualon/bitnamicharts
version: 15.3.8
condition: postgresql-ha.enabled
# https://github.com/bitnami/charts/blob/main/bitnami/redis-cluster/Chart.yaml
- name: redis-cluster
repository: oci://ghcr.io/visualon/bitnamicharts
version: 11.4.6
condition: redis-cluster.enabled
# https://github.com/bitnami/charts/blob/main/bitnami/redis/Chart.yaml
- name: redis
repository: oci://ghcr.io/visualon/bitnamicharts
version: 20.11.4
condition: redis.enabled
- name: mariadb
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
version: 11.4.4
condition: mariadb.enabled

View file

@ -1,6 +1,5 @@
MIT License
Copyright (c) 2023 The Forgejo Authors
Copyright (c) 2020 The Gitea Authors
Copyright (c) 2020 NOVUM-RGI
Copyright (c) 2019 - 2020 Charlie Drage

View file

@ -1,17 +1,12 @@
.PHONY: prepare-environment
prepare-environment:
pnpm install
npm install
.PHONY: readme
readme: prepare-environment
pnpm readme:parameters
pnpm readme:lint
npm run readme:parameters
npm run readme:lint
.PHONY: unittests
unittests:
helm unittest --strict -f 'unittests/**/*.yaml' ./
.PHONY: helm
update-helm-dependencies:
helm dependency update
helm unittest --helm3 --strict -f 'unittests/**/*.yaml' ./

950
README.md

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@
# Artifact Hub repository metadata file
# https://artifacthub.io/docs/topics/repositories/helm-charts/#oci-support
# publish via:
# oras push code.forgejo.org/forgejo-helm/forgejo:artifacthub.io --config artifacthub.config.json:application/vnd.cncf.artifacthub.config.v1+yaml artifacthub-repo.yml:application/vnd.cncf.artifacthub.repository-metadata.layer.v1.yaml
# oras push codeberg.org/forgejo-contrib/forgejo:artifacthub.io --config artifacthub.config.json:application/vnd.cncf.artifacthub.config.v1+yaml artifacthub-repo.yml:application/vnd.cncf.artifacthub.repository-metadata.layer.v1.yaml
repositoryID: 'ec84c95a-a288-4aaa-a690-a656b57e3136'
owners: # (optional, used to claim repository ownership)
- name: viceice

View file

@ -1,20 +0,0 @@
# default values with some modifications
# Use mirror
# https://code.forgejo.org/forgejo-helm/forgejo-helm/issues/1045
global:
security:
allowInsecureImages: true
redis-cluster:
image:
registry: public.ecr.aws
postgresql-ha:
postgresql:
image:
registry: public.ecr.aws
pgpool:
image:
registry: public.ecr.aws
test:
image:
name: code.forgejo.org/oci/busybox

View file

@ -1,27 +0,0 @@
# Test codeberg.org image
image:
registry: codeberg.org
# Use mirror
# https://code.forgejo.org/forgejo-helm/forgejo-helm/issues/1045
test:
image:
name: code.forgejo.org/oci/busybox
redis-cluster:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: false
gitea:
config:
database:
DB_TYPE: sqlite3
session:
PROVIDER: memory
cache:
ADAPTER: memory
queue:
TYPE: level

View file

@ -1,37 +0,0 @@
redis-cluster:
enabled: false
postgresql-ha:
enabled: false
postgresql:
enabled: true
# Use mirror
# https://code.forgejo.org/forgejo-helm/forgejo-helm/issues/1045
image:
registry: public.ecr.aws
global:
security:
allowInsecureImages: true
# Use mirror
# https://code.forgejo.org/forgejo-helm/forgejo-helm/issues/1045
test:
image:
name: code.forgejo.org/oci/busybox
persistence:
enabled: true
gitea:
config:
database:
DB_TYPE: postgres
session:
PROVIDER: db
cache:
ADAPTER: memory
queue:
TYPE: level
indexer:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true

View file

@ -1,29 +0,0 @@
image:
registry: codeberg.org
repository: forgejo-experimental/forgejo
tag: 10 # don't pin, manifests can be missing
# Use mirror
# https://code.forgejo.org/forgejo-helm/forgejo-helm/issues/1045
test:
image:
name: code.forgejo.org/oci/busybox
redis-cluster:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: false
gitea:
config:
database:
DB_TYPE: sqlite3
session:
PROVIDER: memory
cache:
ADAPTER: memory
queue:
TYPE: level

View file

@ -1,29 +0,0 @@
image:
registry: codeberg.org
repository: forgejo-experimental/forgejo
tag: 11 # don't pin, manifests can be missing
# Use mirror
# https://code.forgejo.org/forgejo-helm/forgejo-helm/issues/1045
test:
image:
name: code.forgejo.org/oci/busybox
redis-cluster:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: false
gitea:
config:
database:
DB_TYPE: sqlite3
session:
PROVIDER: memory
cache:
ADAPTER: memory
queue:
TYPE: level

View file

@ -1,29 +0,0 @@
image:
registry: codeberg.org
repository: forgejo-experimental/forgejo
tag: 12 # don't pin, manifests can be missing
# Use mirror
# https://code.forgejo.org/forgejo-helm/forgejo-helm/issues/1045
test:
image:
name: code.forgejo.org/oci/busybox
redis-cluster:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: false
gitea:
config:
database:
DB_TYPE: sqlite3
session:
PROVIDER: memory
cache:
ADAPTER: memory
queue:
TYPE: level

View file

@ -1,178 +0,0 @@
# High Availability
All components (in-memory DB, volume/asset storage, code indexer) used by Forgejo must be deployed in a HA-ready fashion to achieve a full HA-ready Forgejo deployment.
The following document explains how to achieve this for all individual components.
The resulting Forgejo deployment will consist of ~ 10 pods (depending on the chosen components and their replicas).
One should evaluate upfront whether a HA-deployment is required as switching between HA/non-HA comes with some effort.
For production instances, HA is always recommended to increase uptime and have a frictionless update process.
A general comment about chart dependencies and external services:
Instead of relying on chart dependencies, it is often better to rely on an external, (managed) instances (in-memory database, asset storage provider, database, etc.).
Many cloud providers offer such services, at least for databases or in-memory databases.
They might cost a bit more than using a self-hosted k8s variant but are usually easier to maintain and scale, if needed.
Also they can be centrally managed and are not linked to the Forgejo helm chart or namespace.
Please consider using external services before you start with your Forgejo HA setup, it will make your life (and the life of the Forgejo maintainers) easier.
This helm chart tries to help as much as possible to simplify and assert the provisioning of a HA-ready Forgejo instance by implementing smart conditionals if `replicaCount` is set to a value > 1.
Nevertheless, we cannot guarantee for every possible combination of Forgejo settings to work together perfectly in a HA setup.
As a general advice, we recommend to have a test environment aside on which to test possible changes/upgrades before applying these to a production installation.
## Requirements for HA
Storage-wise, the HA-Forgejo setup requires a RWX file-system which can be shared among the deployment-based replica pods.
In addition, the following components are required for full HA-readiness:
- A HA-ready issue (and optionally code) indexer: `elasticsearch` or `meilisearch`
- A HA-ready external object/asset storage (`minio`) (optional, assets can also be stored on the RWX file-system)
- A HA-ready cache (`redis-cluster`)
- A HA-ready DB
`postgres.enabled`, which default to `true`, must be set to `false` for a HA setup.
The default `postgres` chart dependency is not HA-ready (there's a dedicated `postgres-ha` chart).
The following sections discuss each of the components in more detail.
Note that for each component discussed, the shown configurations only provides a (working) starting point, not necessarily the most optimal setup.
We try to optimize this document over time as we have gained more experience with HA setups from users.
## Indexers (Issues and code/repo)
The default code indexer `bleve` is not able to allow multiple connections and hence cannot be used in a HA setup.
Alternatives are `elasticsearch` and `meilisearch` (as of >= 1.19.2).
Unless you have an existing `elasticsearch` cluster, we recommend using `meilisearch` as it is faster and requires way less resources.
Unfortunately, `meilisearch` does only support the `ISSUE_INDEXER` and not the `REPO_INDEXER` yet ([tracking issue](https://github.com/go-gitea/gitea/pull/24149)).
This means that the `REPO_INDEXER` must still be disabled for a HA setup right now.
An alternative to the two options above for the `ISSUE_INDEXER` is `"db"`, however we recommend to just go with `meilisearch` in this case and to not bother the DB with indexing.
To configure `meilisearch` within Forgejo, do the following:
```yml
gitea:
config:
indexer:
ISSUE_INDEXER_CONN_STR: <http://meilisearch.<namespace>.svc.cluster.local:7700>
ISSUE_INDEXER_ENABLED: true
ISSUE_INDEXER_TYPE: meilisearch
REPO_INDEXER_ENABLED: false
# REPO_INDEXER_TYPE: meilisearch # not yet working
```
Unfortunately `meilisearch` cannot be deployed in HA as of now.
Nevertheless it allows for multiple Forgejo requests at the same time and is therefore required in a HA setup.
Exemplary configuration for the [meilisearch-kubernetes](https://github.com/meilisearch/meilisearch-kubernetes/tree/main/charts/meilisearch) chart:
```yaml
persistence:
enabled: true
accessMode: ReadWriteOnce
size: 5Gi
```
## Cache, session and queue
A `redis` instance is required for the in-memory cache.
Two options exist:
- `redis`
- `redis-cluster`
The chart provides `redis-cluster` as a dependency as this one can be used for both HA and non-HA setups.
You're also welcome to go with `redis` if you prefer or already have a running instance.
It should be noted that `redis-cluster` support is only available starting with Forgejo 1.19.2.
You can also configure an external (managed) `redis` instance to be used.
To do so, you need to set the following configuration values yourself:
- `gitea.config.queue.TYPE`: redis`
- `gitea.config.queue.CONN_STR`: `<your redis connection string>`
- `gitea.config.session.PROVIDER`: `redis`
- `gitea.config.session.PROVIDER_CONFIG`: `<your redis connection string>`
- `gitea.config.cache.ENABLED`: `true`
- `gitea.config.cache.ADAPTER`: `redis`
- `gitea.config.cache.HOST`: `<your redis connection string>`
By default, the `redis-cluster` chart provisions three standalone master nodes of which each has a single replica.
To reduce the number of pods for a default Forgejo deployment, we opted to omit the replicas (`replicas: 0`) by default.
Only the minimum required number of master pods for a functional `redis-cluster` deployment are provisioned.
For a "proper" `redis-cluster` setup however, we recommend to set `replicas: 1` and `nodes: 6`.
## Object and asset storage
Object/asset storage refers to the storage of attachments, avatars, LFS files, etc.
While most of these can be stored on the RWX file-system, it is recommended to use an external S3-compatible object storage for such, mainly for performance reasons.
By default the chart provisions a single RWO volume to store everything (repos, avatars, packages, etc.).
This volume cannot be mounted by multiple pods.
Hence, a RWX volume is required and (optionally) an external HA-ready object storage.
> **Note:** Double-check that the file permissions are set correctly on the RWX volume! That is everything should be owned by the `git` user which usually has `uid=1000` and `gid=1000`.
To use `minio` you need to deploy and configure an external `minio` instance yourself and explicitly define the `STORAGE_TYPE` values as shown below.
Note that `MINIO_BUCKET` here is just a name and does not refer to a S3 bucket.
It's the root access point for all objects belonging to the respective application, i.e., to Forgejo in this case.
```yaml
gitea:
config:
attachment:
STORAGE_TYPE: minio
lfs:
STORAGE_TYPE: minio
picture:
AVATAR_STORAGE_TYPE: minio
'storage.packages':
STORAGE_TYPE: minio
storage:
MINIO_ENDPOINT: <minio-headless.<namespace>.svc.cluster.local:9000>
MINIO_LOCATION: <location>
MINIO_ACCESS_KEY_ID: <access key>
MINIO_SECRET_ACCESS_KEY: <secret key>
MINIO_BUCKET: <bucket name>
MINIO_USE_SSL: false
```
Exemplary configuration for the [bitnami minio](https://github.com/bitnami/charts/blob/main/bitnami/minio) chart:
```yaml
auth:
rootUser: minio
mode: distributed
replicaCount: 4
persistence:
enabled: true
size: 20Gi
accessModes:
- ReadWriteOnce
```
## Database
If you do not have an HA-ready DB, using a managed database service in the cloud might be the easiest and most robust solution.
Remember: disable the built-in `postgres` dependency and configure the database connection manually via `gitea.config.database`:
```yml
gitea:
database:
builtIn:
postgresql:
enabled: false
config:
database:
DB_TYPE: postgres
HOST: <host>
NAME: <name>
USER: <user>
```
## Known issues
- Currently Cron jobs are run on all replicas as no leader election is implemented.
See [https://github.com/go-gitea/gitea/issues/13791](https://github.com/go-gitea/gitea/issues/13791) for a discussion and possible solution.
- Running with multiple replicas slows down Forgejo a bit, i.e. page loading time increases.

View file

@ -1,17 +0,0 @@
# based on https://github.com/kind-ci/examples
apiVersion: kind.x-k8s.io/v1alpha4
kind: Cluster
name: chart-testing
networking:
apiServerAddress: '0.0.0.0'
nodes:
# add to the apiServer certSANs the name of the drone service in order to be able to reach the cluster through it
- role: control-plane
kubeadmConfigPatches:
- |
kind: ClusterConfiguration
apiServer:
certSANs:
- docker
- role: worker

3645
package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,31 +1,22 @@
{
"name": "forgejo-helm-chart",
"homepage": "https://code.forgejo.org/forgejo-helm/forgejo-helm",
"homepage": "https://codeberg.org/forgejo-contrib/forgejo-helm.git",
"license": "MIT",
"private": true,
"engineStrict": true,
"engines": {
"node": ">=16.0.0",
"npm": ">=8.0.0"
},
"scripts": {
"changelog": "node tools/changelog.mjs",
"forgejo:release": "node tools/forgejo-release.js",
"prepare": "husky",
"prettier": "prettier --check --ignore-unknown --cache '**/*.*'",
"prettier-fix": "prettier --write --ignore-unknown --cache '**/*.*'",
"readme:lint": "markdownlint *.md -f",
"readme:parameters": "readme-generator -v values.yaml -r README.md",
"test": "helm unittest --strict -f 'unittests/**/*.yaml' ./"
"readme:parameters": "readme-generator -v values.yaml -r README.md"
},
"devDependencies": {
"@bitnami/readme-generator-for-helm": "2.7.0",
"clipanion": "3.2.1",
"conventional-changelog-conventionalcommits": "8.0.0",
"conventional-changelog-core": "9.0.0",
"husky": "9.1.7",
"lint-staged": "15.5.0",
"markdownlint-cli": "0.44.0",
"prettier": "3.5.3"
},
"packageManager": "pnpm@10.7.0",
"engines": {
"node": "^22.0.0",
"pnpm": "^10.0.0"
"@bitnami/readme-generator-for-helm": "^2.4.2",
"conventional-changelog-conventionalcommits": "^5.0.0",
"conventional-changelog-core": "^4.2.4",
"markdownlint-cli": "^0.33.0"
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,24 +1,11 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"forgejo-contrib/forgejo-renovate//base.json",
"forgejo-helm/forgejo-helm//.forgejo/renovate/k3s.json"
],
"extends": "config:base",
"assignees": ["viceice"],
"baseBranches": ["main", "/^maint\\/.+/"],
"semanticCommits": "enabled",
"automergeStrategy": "fast-forward",
"enabledManagers": ["helmv3", "npm", "regex", "woodpecker"],
"packageRules": [
{
"description": "Separate multiple major sub chart updates",
"matchFileNames": ["Chart.yaml"],
"separateMultipleMajor": true
},
{
"description": "Require approval for major sub chart updates for maintenance branches",
"matchBaseBranches": ["/^maint\\/.+/"],
"matchUpdateTypes": ["major"],
"matchFileNames": ["Chart.yaml"],
"dependencyDashboardApproval": true
},
{
"matchManagers": ["helmv3"],
"matchUpdateTypes": ["minor", "patch"],
@ -30,108 +17,45 @@
"semanticCommitType": "feat"
},
{
"matchManagers": ["custom.regex"],
"description": "Fix forgejo docker image versioning",
"matchDepNames": ["forgejo"],
"matchDatasources": ["docker"],
"versioning": "regex:^(?<major>\\d+)\\.(?<minor>\\d+)\\.(?<patch>\\d+)-(?<build>\\d+)(?:-(?<compatibility>.+))?$"
},
{
"matchManagers": ["regex"],
"matchDepNames": ["forgejo"],
"matchUpdateTypes": ["patch"],
"semanticCommitType": "fix"
},
{
"matchManagers": ["custom.regex"],
"matchManagers": ["regex"],
"matchDepNames": ["forgejo"],
"matchUpdateTypes": ["major", "minor"],
"semanticCommitType": "feat"
},
{
"description": "Automerge and group helm subchart updates weekly (minor & patch)",
"matchManagers": ["helmv3"],
"matchFileNames": ["Chart.yaml"],
"matchUpdateTypes": ["minor", "patch"],
"description": "Automerge renovate updates",
"matchPackageNames": ["ghcr.io/visualon/renovate"],
"automerge": true,
"groupName": "subcharts",
"extends": ["schedule:weekly"]
"platformAutomerge": true
},
{
"description": "Automerge dev deps updates",
"matchManagers": ["npm"],
"matchDepTypes": ["devDependencies"],
"matchUpdateTypes": ["minor", "patch"],
"automerge": true
},
{
"description": "Automerge node updates",
"matchManagers": ["nodenv"],
"matchUpdateTypes": ["minor", "patch"],
"automerge": true
},
{
"description": "Automerge some updates",
"matchDepNames": ["pnpm", "helm-unittest"],
"matchUpdateTypes": ["minor", "patch"],
"automerge": true
},
{
"description": "Automerge digest updates",
"matchUpdateTypes": ["digest"],
"automerge": true
},
{
"description": "Use test scope for forgejo ci tests",
"matchFileNames": ["ci/*.yaml"],
"additionalBranchPrefix": "ci-forgejo-",
"semanticCommitType": "ci",
"semanticCommitScope": "forgejo",
"groupName": "experimental docker digests",
"extends": ["schedule:daily"]
},
{
"description": "Disable updates for forgejo ci tests",
"matchFileNames": ["ci/*.yaml"],
"matchUpdateTypes": ["major", "minor", "patch"],
"enabled": false
},
{
"description": "Don't pin digests for forgejo ci tests, not supported",
"matchFileNames": ["ci/*.yaml"],
"pinDigests": false
},
{
"description": "branch automerge not possible",
"automergeType": "pr",
"matchPackageNames": ["/.+/"]
"description": "Automerge patch deps updates",
"matchManagers": ["helmv3"],
"matchFiles": ["Chart.yaml"],
"matchUpdateTypes": ["patch"],
"automerge": true,
"platformAutomerge": true
}
],
"customManagers": [
"regexManagers": [
{
"description": "Update forgeo version in chart",
"customType": "regex",
"fileMatch": ["^Chart\\.yaml$"],
"matchStrings": ["appVersion: (?<currentValue>.+?)\\s"],
"depNameTemplate": "forgejo",
"packageNameTemplate": "code.forgejo.org/forgejo/forgejo",
"packageNameTemplate": "codeberg.org/forgejo/forgejo",
"datasourceTemplate": "docker"
},
{
"description": "Detect helm-unittest yaml schema file",
"customType": "regex",
"fileMatch": [".vscode/settings\\.json$"],
"matchStrings": [
"https://raw\\.githubusercontent\\.com/helm-unittest/helm-unittest/(?<currentValue>v[0-9.]+?)/schema/helm-testsuite\\.json"
],
"depNameTemplate": "helm-unittest",
"packageNameTemplate": "helm-unittest/helm-unittest",
"datasourceTemplate": "github-releases"
},
{
"customType": "regex",
"description": "Update k3s kubernetes references",
"fileMatch": ["^\\.forgejo/workflows/[^/]+\\.ya?ml$"],
"matchStrings": [" +- (?<currentValue>.+?) # renovate: k3s\\n"],
"depNameTemplate": "k3s",
"packageNameTemplate": "k3s-io/k3s",
"datasourceTemplate": "github-releases"
}
],
"helm-values": {
"fileMatch": ["^ci/.+\\.yaml$"]
}
]
}

View file

@ -18,19 +18,3 @@
echo "Visit http://127.0.0.1:{{ .Values.service.http.port }} to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward svc/{{ .Release.Name }}-http {{ .Values.service.http.port }}:{{ .Values.service.http.port }}
{{- end }}
{{- $warnings := list -}}
{{- if eq (get .Values.gitea.config.cache "ADAPTER") "memory" -}}
{{- $warnings = append $warnings "Forgejo uses 'memory' for caching which is not recommended for production use. See https://forgejo.org/docs/latest/admin/config-cheat-sheet/#cache-cache for available options." -}}
{{- end }}
{{- if eq (get .Values.gitea.config.queue "TYPE") "level" -}}
{{- $warnings = append $warnings "Forgejo uses 'leveldb' for queue actions which is not recommended for production use. See https://forgejo.org/docs/latest/admin/config-cheat-sheet/#queue-queue-and-queue for available options." -}}
{{- end }}
{{- if eq (get .Values.gitea.config.session "PROVIDER") "memory" -}}
{{- $warnings = append $warnings "Forgejo uses 'memory' for sessions which is not recommended for production use. See https://forgejo.org/docs/latest/admin/config-cheat-sheet/#session-session for available options." -}}
{{- end }}
{{- if gt (len $warnings) 0 }}
2. Review these warnings:
{{- range $warnings }}
- {{ . }}
{{- end }}
{{- end }}

View file

@ -2,7 +2,6 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "gitea.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
@ -32,34 +31,18 @@ Create chart name and version as used by the chart label.
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Get version from .Values.image.tag or Chart.AppVersion.
Trim optional docker digest.
*/}}
{{- define "gitea.version" -}}
{{- regexReplaceAll "@.+" (.Values.image.tag | default .Chart.AppVersion | toString) "" -}}
{{- end -}}
{{/*
Create image name and tag used by the deployment.
*/}}
{{- define "gitea.image" -}}
{{- $fullOverride := .Values.image.fullOverride | default "" -}}
{{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}}
{{- $repository := .Values.image.repository -}}
{{- $separator := ":" -}}
{{- $tag := .Values.image.tag | default .Chart.AppVersion | toString -}}
{{- $name := .Values.image.repository -}}
{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
{{- $rootless := ternary "-rootless" "" (.Values.image.rootless) -}}
{{- $digest := "" -}}
{{- if .Values.image.digest }}
{{- $digest = (printf "@%s" (.Values.image.digest | toString)) -}}
{{- end -}}
{{- if $fullOverride }}
{{- printf "%s" $fullOverride -}}
{{- else if $registry }}
{{- printf "%s/%s%s%s%s%s" $registry $repository $separator $tag $rootless $digest -}}
{{- if $registry -}}
{{- printf "%s/%s:%s%s" $registry $name $tag $rootless -}}
{{- else -}}
{{- printf "%s%s%s%s%s" $repository $separator $tag $rootless $digest -}}
{{- printf "%s:%s%s" $name $tag $rootless -}}
{{- end -}}
{{- end -}}
@ -82,7 +65,7 @@ imagePullSecrets:
Storage Class
*/}}
{{- define "gitea.persistence.storageClass" -}}
{{- $storageClass := (tpl ( default "" .Values.persistence.storageClass) .) | default (tpl ( default "" .Values.global.storageClass) .) }}
{{- $storageClass := .Values.global.storageClass | default .Values.persistence.storageClass }}
{{- if $storageClass }}
storageClassName: {{ $storageClass | quote }}
{{- end }}
@ -95,8 +78,8 @@ Common labels
helm.sh/chart: {{ include "gitea.chart" . }}
app: {{ include "gitea.name" . }}
{{ include "gitea.selectorLabels" . }}
app.kubernetes.io/version: {{ include "gitea.version" . | quote }}
version: {{ include "gitea.version" . | quote }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
@ -108,46 +91,24 @@ app.kubernetes.io/name: {{ include "gitea.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "postgresql-ha.dns" -}}
{{- if (index .Values "postgresql-ha").enabled -}}
{{- printf "%s-postgresql-ha-pgpool.%s.svc.%s:%g" .Release.Name .Release.Namespace .Values.clusterDomain (index .Values "postgresql-ha" "service" "ports" "postgresql") -}}
{{- end -}}
{{- end -}}
{{- define "postgresql.dns" -}}
{{- if (index .Values "postgresql").enabled -}}
{{- printf "%s-postgresql.%s.svc.%s:%g" .Release.Name .Release.Namespace .Values.clusterDomain .Values.postgresql.global.postgresql.service.ports.postgresql -}}
{{- end -}}
{{- printf "%s-postgresql.%s.svc.%s:%g" .Release.Name .Release.Namespace .Values.clusterDomain .Values.postgresql.primary.service.ports.postgresql -}}
{{- end -}}
{{- define "redis.dns" -}}
{{- if and ((index .Values "redis-cluster").enabled) ((index .Values "redis").enabled) -}}
{{- fail "redis and redis-cluster cannot be enabled at the same time. Please only choose one." -}}
{{- else if (index .Values "redis-cluster").enabled -}}
{{- printf "redis+cluster://:%s@%s-redis-cluster-headless.%s.svc.%s:%g/0?pool_size=100&idle_timeout=180s&" (index .Values "redis-cluster").global.redis.password .Release.Name .Release.Namespace .Values.clusterDomain (index .Values "redis-cluster").service.ports.redis -}}
{{- else if (index .Values "redis").enabled -}}
{{- printf "redis://:%s@%s-redis-headless.%s.svc.%s:%g/0?pool_size=100&idle_timeout=180s&" (index .Values "redis").global.redis.password .Release.Name .Release.Namespace .Values.clusterDomain (index .Values "redis").master.service.ports.redis -}}
{{- end -}}
{{- define "mysql.dns" -}}
{{- printf "%s-mysql.%s.svc.%s:%g" .Release.Name .Release.Namespace .Values.clusterDomain .Values.mysql.primary.service.ports.mysql | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "redis.port" -}}
{{- if (index .Values "redis-cluster").enabled -}}
{{ (index .Values "redis-cluster").service.ports.redis }}
{{- else if (index .Values "redis").enabled -}}
{{ (index .Values "redis").master.service.ports.redis }}
{{- end -}}
{{- define "mariadb.dns" -}}
{{- printf "%s-mariadb.%s.svc.%s:%g" .Release.Name .Release.Namespace .Values.clusterDomain .Values.mariadb.primary.service.ports.mysql | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "redis.servicename" -}}
{{- if (index .Values "redis-cluster").enabled -}}
{{- printf "%s-redis-cluster-headless.%s.svc.%s" .Release.Name .Release.Namespace .Values.clusterDomain -}}
{{- else if (index .Values "redis").enabled -}}
{{- printf "%s-redis-headless.%s.svc.%s" .Release.Name .Release.Namespace .Values.clusterDomain -}}
{{- end -}}
{{- define "memcached.dns" -}}
{{- printf "%s-memcached.%s.svc.%s:%g" .Release.Name .Release.Namespace .Values.clusterDomain .Values.memcached.service.ports.memcached | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "gitea.default_domain" -}}
{{- printf "%s-http.%s.svc.%s" (include "gitea.fullname" .) .Release.Namespace .Values.clusterDomain -}}
{{- printf "%s-gitea.%s.svc.%s" (include "gitea.fullname" .) .Release.Namespace .Values.clusterDomain | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "gitea.ldap_settings" -}}
@ -224,12 +185,11 @@ https
{{- $_ := set $inlines $key (join "\n" $section) -}}
{{- end -}}
{{- else }}
{{- if or (eq $key "APP_NAME") (eq $key "RUN_USER") (eq $key "RUN_MODE") (eq $key "APP_SLOGAN") (eq $key "APP_DISPLAY_NAME_FORMAT") -}}
{{- if or (eq $key "APP_NAME") (eq $key "RUN_USER") (eq $key "RUN_MODE") -}}
{{- $generals = append $generals (printf "%s=%s" $key $value) -}}
{{- else -}}
{{- (printf "Key %s cannot be on top level of configuration" $key) | fail -}}
{{- end -}}
{{- end }}
{{- end }}
@ -259,18 +219,6 @@ https
{{- if not (hasKey .Values.gitea.config "oauth2") -}}
{{- $_ := set .Values.gitea.config "oauth2" dict -}}
{{- end -}}
{{- if not (hasKey .Values.gitea.config "session") -}}
{{- $_ := set .Values.gitea.config "session" dict -}}
{{- end -}}
{{- if not (hasKey .Values.gitea.config "queue") -}}
{{- $_ := set .Values.gitea.config "queue" dict -}}
{{- end -}}
{{- if not (hasKey .Values.gitea.config "queue.issue_indexer") -}}
{{- $_ := set .Values.gitea.config "queue.issue_indexer" dict -}}
{{- end -}}
{{- if not (hasKey .Values.gitea.config "indexer") -}}
{{- $_ := set .Values.gitea.config "indexer" dict -}}
{{- end -}}
{{- end -}}
{{- define "gitea.inline_configuration.defaults" -}}
@ -286,36 +234,12 @@ https
{{- if not (hasKey .Values.gitea.config.metrics "ENABLED") -}}
{{- $_ := set .Values.gitea.config.metrics "ENABLED" .Values.gitea.metrics.enabled -}}
{{- end -}}
{{- /* redis queue */ -}}
{{- if or ((index .Values "redis-cluster").enabled) ((index .Values "redis").enabled) -}}
{{- $_ := set .Values.gitea.config.queue "TYPE" "redis" -}}
{{- $_ := set .Values.gitea.config.queue "CONN_STR" (include "redis.dns" .) -}}
{{- $_ := set .Values.gitea.config.session "PROVIDER" "redis" -}}
{{- $_ := set .Values.gitea.config.session "PROVIDER_CONFIG" (include "redis.dns" .) -}}
{{- $_ := set .Values.gitea.config.cache "ADAPTER" "redis" -}}
{{- $_ := set .Values.gitea.config.cache "HOST" (include "redis.dns" .) -}}
{{- else -}}
{{- if not (get .Values.gitea.config.session "PROVIDER") -}}
{{- $_ := set .Values.gitea.config.session "PROVIDER" "memory" -}}
{{- if .Values.memcached.enabled -}}
{{- $_ := set .Values.gitea.config.cache "ENABLED" "true" -}}
{{- $_ := set .Values.gitea.config.cache "ADAPTER" "memcache" -}}
{{- if not (.Values.gitea.config.cache.HOST) -}}
{{- $_ := set .Values.gitea.config.cache "HOST" (include "memcached.dns" .) -}}
{{- end -}}
{{- if not (get .Values.gitea.config.session "PROVIDER_CONFIG") -}}
{{- $_ := set .Values.gitea.config.session "PROVIDER_CONFIG" "" -}}
{{- end -}}
{{- if not (get .Values.gitea.config.queue "TYPE") -}}
{{- $_ := set .Values.gitea.config.queue "TYPE" "level" -}}
{{- end -}}
{{- if not (get .Values.gitea.config.queue "CONN_STR") -}}
{{- $_ := set .Values.gitea.config.queue "CONN_STR" "" -}}
{{- end -}}
{{- if not (get .Values.gitea.config.cache "ADAPTER") -}}
{{- $_ := set .Values.gitea.config.cache "ADAPTER" "memory" -}}
{{- end -}}
{{- if not (get .Values.gitea.config.cache "HOST") -}}
{{- $_ := set .Values.gitea.config.cache "HOST" "" -}}
{{- end -}}
{{- end -}}
{{- if not .Values.gitea.config.indexer.ISSUE_INDEXER_TYPE -}}
{{- $_ := set .Values.gitea.config.indexer "ISSUE_INDEXER_TYPE" "db" -}}
{{- end -}}
{{- end -}}
@ -328,7 +252,7 @@ https
{{- end -}}
{{- if not (.Values.gitea.config.server.DOMAIN) -}}
{{- if gt (len .Values.ingress.hosts) 0 -}}
{{- $_ := set .Values.gitea.config.server "DOMAIN" ( tpl (index .Values.ingress.hosts 0).host $) -}}
{{- $_ := set .Values.gitea.config.server "DOMAIN" (index .Values.ingress.hosts 0).host -}}
{{- else -}}
{{- $_ := set .Values.gitea.config.server "DOMAIN" (include "gitea.default_domain" .) -}}
{{- end -}}
@ -363,23 +287,30 @@ https
{{- end -}}
{{- define "gitea.inline_configuration.defaults.database" -}}
{{- if (index .Values "postgresql-ha" "enabled") -}}
{{- $_ := set .Values.gitea.config.database "DB_TYPE" "postgres" -}}
{{- if not (.Values.gitea.config.database.HOST) -}}
{{- $_ := set .Values.gitea.config.database "HOST" (include "postgresql-ha.dns" .) -}}
{{- end -}}
{{- $_ := set .Values.gitea.config.database "NAME" (index .Values "postgresql-ha" "global" "postgresql" "database") -}}
{{- $_ := set .Values.gitea.config.database "USER" (index .Values "postgresql-ha" "global" "postgresql" "username") -}}
{{- $_ := set .Values.gitea.config.database "PASSWD" (index .Values "postgresql-ha" "global" "postgresql" "password") -}}
{{- end -}}
{{- if (index .Values "postgresql" "enabled") -}}
{{- if .Values.postgresql.enabled -}}
{{- $_ := set .Values.gitea.config.database "DB_TYPE" "postgres" -}}
{{- if not (.Values.gitea.config.database.HOST) -}}
{{- $_ := set .Values.gitea.config.database "HOST" (include "postgresql.dns" .) -}}
{{- end -}}
{{- $_ := set .Values.gitea.config.database "NAME" .Values.postgresql.global.postgresql.auth.database -}}
{{- $_ := set .Values.gitea.config.database "USER" .Values.postgresql.global.postgresql.auth.username -}}
{{- $_ := set .Values.gitea.config.database "PASSWD" .Values.postgresql.global.postgresql.auth.password -}}
{{- $_ := set .Values.gitea.config.database "NAME" .Values.postgresql.auth.database -}}
{{- $_ := set .Values.gitea.config.database "USER" .Values.postgresql.auth.username -}}
{{- $_ := set .Values.gitea.config.database "PASSWD" .Values.postgresql.auth.password -}}
{{- else if .Values.mysql.enabled -}}
{{- $_ := set .Values.gitea.config.database "DB_TYPE" "mysql" -}}
{{- if not (.Values.gitea.config.database.HOST) -}}
{{- $_ := set .Values.gitea.config.database "HOST" (include "mysql.dns" .) -}}
{{- end -}}
{{- $_ := set .Values.gitea.config.database "NAME" .Values.mysql.auth.database -}}
{{- $_ := set .Values.gitea.config.database "USER" .Values.mysql.auth.username -}}
{{- $_ := set .Values.gitea.config.database "PASSWD" .Values.mysql.auth.password -}}
{{- else if .Values.mariadb.enabled -}}
{{- $_ := set .Values.gitea.config.database "DB_TYPE" "mysql" -}}
{{- if not (.Values.gitea.config.database.HOST) -}}
{{- $_ := set .Values.gitea.config.database "HOST" (include "mariadb.dns" .) -}}
{{- end -}}
{{- $_ := set .Values.gitea.config.database "NAME" .Values.mariadb.auth.database -}}
{{- $_ := set .Values.gitea.config.database "USER" .Values.mariadb.auth.username -}}
{{- $_ := set .Values.gitea.config.database "PASSWD" .Values.mariadb.auth.password -}}
{{- end -}}
{{- end -}}
@ -404,15 +335,3 @@ https
{{- define "gitea.gpg-key-secret-name" -}}
{{ default (printf "%s-gpg-key" (include "gitea.fullname" .)) .Values.signing.existingSecret }}
{{- end -}}
{{- define "gitea.serviceAccountName" -}}
{{ .Values.serviceAccount.name | default (include "gitea.fullname" .) }}
{{- end -}}
{{- define "gitea.admin.passwordMode" -}}
{{- if has .Values.gitea.admin.passwordMode (tuple "keepUpdated" "initialOnlyNoReset" "initialOnlyRequireReset") -}}
{{ .Values.gitea.admin.passwordMode }}
{{- else -}}
{{ printf "gitea.admin.passwordMode must be set to one of 'keepUpdated', 'initialOnlyNoReset', or 'initialOnlyRequireReset'. Received: '%s'" .Values.gitea.admin.passwordMode | fail }}
{{- end -}}
{{- end -}}

View file

@ -2,7 +2,6 @@ apiVersion: v1
kind: Secret
metadata:
name: {{ include "gitea.fullname" . }}-inline-config
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
type: Opaque
@ -17,42 +16,6 @@ metadata:
{{- include "gitea.labels" . | nindent 4 }}
type: Opaque
stringData:
assertions: |
{{- /*assert that only one PG dep is enabled */ -}}
{{- if and (.Values.postgresql.enabled) (index .Values "postgresql-ha" "enabled") -}}
{{- fail "Only one of postgresql or postgresql-ha can be enabled at the same time." -}}
{{- end }}
{{- /* multiple replicas assertions */ -}}
{{- if gt .Values.replicaCount 1.0 -}}
{{- if .Values.gitea.config.cron -}}
{{- if .Values.gitea.config.cron.GIT_GC_REPOS -}}
{{- if eq .Values.gitea.config.cron.GIT_GC_REPOS.ENABLED true -}}
{{ fail "Invoking the garbage collector via CRON is not yet supported when running with multiple replicas. Please set 'cron.GIT_GC_REPOS.enabled = false'." }}
{{- end }}
{{- end }}
{{- end }}
{{- if eq (first .Values.persistence.accessModes) "ReadWriteOnce" -}}
{{- fail "When using multiple replicas, a RWX file system is required and gitea.persistence.accessModes[0] must be set to ReadWriteMany." -}}
{{- end }}
{{- if .Values.gitea.config.indexer -}}
{{- if eq .Values.gitea.config.indexer.ISSUE_INDEXER_TYPE "bleve" -}}
{{- fail "When using multiple replicas, the issue indexer (gitea.config.indexer.ISSUE_INDEXER_TYPE) must be set to a HA-ready provider such as 'meilisearch', 'elasticsearch' or 'db' (if the DB is HA-ready)." -}}
{{- end }}
{{- if .Values.gitea.config.indexer.REPO_INDEXER_TYPE -}}
{{- if eq .Values.gitea.config.indexer.REPO_INDEXER_TYPE "bleve" -}}
{{- if .Values.gitea.config.indexer.REPO_INDEXER_ENABLED -}}
{{- if eq .Values.gitea.config.indexer.REPO_INDEXER_ENABLED true -}}
{{- fail "When using multiple replicas, the repo indexer (gitea.config.indexer.REPO_INDEXER_TYPE) must be set to 'meilisearch' or 'elasticsearch' or disabled." -}}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
config_environment.sh: |-
#!/usr/bin/env bash
set -euo pipefail
@ -89,18 +52,15 @@ stringData:
env2ini::log " + '${setting}'"
local masked_setting="${setting//./_0X2E_}" # '//' instructs to replace all matches
masked_setting="${masked_setting//-/_0X2D_}"
if [[ -z "${section}" ]]; then
export "FORGEJO____${masked_setting^^}=${value}" # '^^' makes the variable content uppercase
export "ENV_TO_INI____${setting^^}=${value}" # '^^' makes the variable content uppercase
return
fi
local masked_section="${section//./_0X2E_}" # '//' instructs to replace all matches
masked_section="${masked_section//-/_0X2D_}"
export "FORGEJO__${masked_section^^}__${masked_setting^^}=${value}" # '^^' makes the variable content uppercase
export "ENV_TO_INI__${masked_section^^}__${setting^^}=${value}" # '^^' makes the variable content uppercase
}
function env2ini::reload_preset_envs() {
@ -171,20 +131,18 @@ stringData:
function env2ini::generate_initial_secrets() {
# These environment variables will either be
# - overwritten with user defined values,
# - initially used to set up Forgejo
# - initially used to set up Gitea
# Anyway, they won't harm existing app.ini files
export FORGEJO__SECURITY__INTERNAL_TOKEN=$(gitea generate secret INTERNAL_TOKEN)
export FORGEJO__SECURITY__SECRET_KEY=$(gitea generate secret SECRET_KEY)
export FORGEJO__OAUTH2__JWT_SECRET=$(gitea generate secret JWT_SECRET)
export FORGEJO__SERVER__LFS_JWT_SECRET=$(gitea generate secret LFS_JWT_SECRET)
export ENV_TO_INI__SECURITY__INTERNAL_TOKEN=$(gitea generate secret INTERNAL_TOKEN)
export ENV_TO_INI__SECURITY__SECRET_KEY=$(gitea generate secret SECRET_KEY)
export ENV_TO_INI__OAUTH2__JWT_SECRET=$(gitea generate secret JWT_SECRET)
export ENV_TO_INI__SERVER__LFS_JWT_SECRET=$(gitea generate secret LFS_JWT_SECRET)
env2ini::log "...Initial secrets generated\n"
}
# save existing envs prior to script execution. Necessary to keep order of
# preexisting and custom envs
env | (grep -e '^FORGEJO__' || [[ $? == 1 ]]) > /tmp/existing-envs
env | (grep ENV_TO_INI || [[ $? == 1 ]]) > /tmp/existing-envs
# MUST BE CALLED BEFORE OTHER CONFIGURATION
env2ini::generate_initial_secrets
@ -205,10 +163,10 @@ stringData:
env2ini::log ' - oauth2.JWT_SECRET'
env2ini::log ' - server.LFS_JWT_SECRET'
unset FORGEJO__SECURITY__INTERNAL_TOKEN
unset FORGEJO__SECURITY__SECRET_KEY
unset FORGEJO__OAUTH2__JWT_SECRET
unset FORGEJO__SERVER__LFS_JWT_SECRET
unset ENV_TO_INI__SECURITY__INTERNAL_TOKEN
unset ENV_TO_INI__SECURITY__SECRET_KEY
unset ENV_TO_INI__OAUTH2__JWT_SECRET
unset ENV_TO_INI__SERVER__LFS_JWT_SECRET
fi
environment-to-ini -o $GITEA_APP_INI
environment-to-ini -o $GITEA_APP_INI -p ENV_TO_INI

View file

@ -1,8 +0,0 @@
{{- range .Values.extraDeploy }}
---
{{- if typeIs "string" . }}
{{- tpl . $ }}
{{- else }}
{{- tpl (. | toYaml) $ }}
{{- end }}
{{- end }}

View file

@ -1,13 +1,12 @@
{{- if .Values.signing.enabled -}}
{{- if and (empty .Values.signing.privateKey) (empty .Values.signing.existingSecret) -}}
{{- fail "Either specify `signing.privateKey` or `signing.existingSecret`" -}}
{{- fail "Either specify `signing.privateKey` or `signing.existingKey`" -}}
{{- end }}
{{- if and (not (empty .Values.signing.privateKey)) (empty .Values.signing.existingSecret) -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "gitea.gpg-key-secret-name" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
type: Opaque

View file

@ -2,21 +2,13 @@ apiVersion: v1
kind: Service
metadata:
name: {{ include "gitea.fullname" . }}-http
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
{{- if .Values.service.http.labels }}
{{- toYaml .Values.service.http.labels | nindent 4 }}
{{- end }}
annotations:
{{- toYaml .Values.service.http.annotations | nindent 4 }}
spec:
type: {{ .Values.service.http.type }}
{{- if eq .Values.service.http.type "LoadBalancer" }}
{{- if .Values.service.http.loadBalancerClass }}
loadBalancerClass: {{ .Values.service.http.loadBalancerClass }}
{{- end }}
{{- if and .Values.service.http.loadBalancerIP }}
{{- if and .Values.service.http.loadBalancerIP (eq .Values.service.http.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.service.http.loadBalancerIP }}
{{- end }}
{{- if .Values.service.http.loadBalancerSourceRanges }}
@ -25,7 +17,6 @@ spec:
- {{ . }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.service.http.externalIPs }}
externalIPs:
{{- toYaml .Values.service.http.externalIPs | nindent 4 }}
@ -49,6 +40,6 @@ spec:
{{- if .Values.service.http.nodePort }}
nodePort: {{ .Values.service.http.nodePort }}
{{- end }}
targetPort: http
targetPort: {{ .Values.gitea.config.server.HTTP_PORT }}
selector:
{{- include "gitea.selectorLabels" . | nindent 4 }}

View file

@ -1,45 +1,58 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "gitea.fullname" . -}}
apiVersion: networking.k8s.io/v1
{{- $httpPort := .Values.service.http.port -}}
{{- $apiVersion := "extensions/v1beta1" -}}
{{- if .Values.ingress.apiVersion -}}
{{- $apiVersion = .Values.ingress.apiVersion -}}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}}
{{- $apiVersion = "networking.k8s.io/v1" }}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}}
{{- $apiVersion = "networking.k8s.io/v1beta1" }}
{{- end }}
apiVersion: {{ $apiVersion }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.className }}
ingressClassName: {{ tpl .Values.ingress.className . }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ tpl . $ | quote }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ tpl .host $ | quote }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if .pathType }}
{{- if and .pathType (eq $apiVersion "networking.k8s.io/v1") }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if eq $apiVersion "networking.k8s.io/v1" }}
service:
name: {{ $fullName }}-http
port:
name: http
number: {{ $httpPort }}
{{- else }}
serviceName: {{ $fullName }}-http
servicePort: {{ $httpPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -2,7 +2,6 @@ apiVersion: v1
kind: Secret
metadata:
name: {{ include "gitea.fullname" . }}-init
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
type: Opaque
@ -59,77 +58,22 @@ stringData:
{ # try
gitea migrate
} || { # catch
echo "Forgejo migrate might fail due to database connection...This init-container will try again in a few seconds"
echo "Gitea migrate might fail due to database connection...This init-container will try again in a few seconds"
exit 1
}
{{- if include "redis.servicename" . }}
function test_redis_connection() {
local RETRY=0
local MAX=30
echo 'Wait for redis to become avialable...'
until [ "${RETRY}" -ge "${MAX}" ]; do
nc -vz -w2 {{ include "redis.servicename" . }} {{ include "redis.port" . }} && break
RETRY=$[${RETRY}+1]
echo "...not ready yet (${RETRY}/${MAX})"
done
if [ "${RETRY}" -ge "${MAX}" ]; then
echo "Redis not reachable after '${MAX}' attempts!"
exit 1
fi
}
test_redis_connection
{{- end }}
{{- if or .Values.gitea.admin.existingSecret (and .Values.gitea.admin.username .Values.gitea.admin.password) }}
function configure_admin_user() {
local full_admin_list=$(gitea admin user list --admin)
local actual_user_table=''
# We might have distorted output due to warning logs, so we have to detect the actual user table by its headline and trim output above that line
local regex="(.*)(ID\s+Username\s+Email\s+IsActive.*)"
if [[ "${full_admin_list}" =~ $regex ]]; then
actual_user_table=$(echo "${BASH_REMATCH[2]}" | tail -n+2) # tail'ing to drop the table headline
else
# This code block should never be reached, as long as the output table header remains the same.
# If this code block is reached, the regex doesn't match anymore and we probably have to adjust this script.
echo "ERROR: 'configure_admin_user' was not able to determine the current list of admin users."
echo " Please review the output of 'gitea admin user list --admin' shown below."
echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-chart/issues."
echo "DEBUG: Output of 'gitea admin user list --admin'"
echo "--"
echo "${full_admin_list}"
echo "--"
exit 1
fi
local ACCOUNT_ID=$(echo "${actual_user_table}" | grep -E "\s+${GITEA_ADMIN_USERNAME}\s+" | awk -F " " "{printf \$1}")
local ACCOUNT_ID=$(gitea admin user list --admin | grep -e "\s\+${GITEA_ADMIN_USERNAME}\s\+" | awk -F " " "{printf \$1}")
if [[ -z "${ACCOUNT_ID}" ]]; then
local -a create_args
create_args=(--admin --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --email {{ .Values.gitea.admin.email | quote }})
if [[ "${GITEA_ADMIN_PASSWORD_MODE}" = initialOnlyRequireReset ]]; then
create_args+=(--must-change-password=true)
else
create_args+=(--must-change-password=false)
fi
echo "No admin user '${GITEA_ADMIN_USERNAME}' found. Creating now..."
gitea admin user create "${create_args[@]}"
gitea admin user create --admin --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --email {{ .Values.gitea.admin.email | quote }} --must-change-password=false
echo '...created.'
else
if [[ "${GITEA_ADMIN_PASSWORD_MODE}" = keepUpdated ]]; then
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist. Running update to sync password..."
local -a change_args
change_args=(--username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --must-change-password=false)
gitea admin user change-password "${change_args[@]}"
gitea admin user change-password --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}"
echo '...password sync done.'
else
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist, but update mode is set to '${GITEA_ADMIN_PASSWORD_MODE}'. Skipping."
fi
fi
}
@ -140,28 +84,7 @@ stringData:
{{- if .Values.gitea.ldap }}
{{- range $idx, $value := .Values.gitea.ldap }}
local LDAP_NAME={{ (printf "%s" $value.name) | squote }}
local full_auth_list=$(gitea admin auth list --vertical-bars)
local actual_auth_table=''
# We might have distorted output due to warning logs, so we have to detect the actual user table by its headline and trim output above that line
local regex="(.*)(ID\s+\|Name\s+\|Type\s+\|Enabled.*)"
if [[ "${full_auth_list}" =~ $regex ]]; then
actual_auth_table=$(echo "${BASH_REMATCH[2]}" | tail -n+2) # tail'ing to drop the table headline
else
# This code block should never be reached, as long as the output table header remains the same.
# If this code block is reached, the regex doesn't match anymore and we probably have to adjust this script.
echo "ERROR: 'configure_ldap' was not able to determine the current list of authentication sources."
echo " Please review the output of 'gitea admin auth list --vertical-bars' shown below."
echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-chart/issues."
echo "DEBUG: Output of 'gitea admin auth list --vertical-bars'"
echo "--"
echo "${full_auth_list}"
echo "--"
exit 1
fi
local GITEA_AUTH_ID=$(echo "${actual_auth_table}" | grep -E "\|${LDAP_NAME}\s+\|" | grep -iE '\|LDAP \(via BindDN\)\s+\|' | awk -F " " "{print \$1}")
local GITEA_AUTH_ID=$(gitea admin auth list --vertical-bars | grep -E "\|${LDAP_NAME}\s+\|" | grep -iE '\|LDAP \(via BindDN\)\s+\|' | awk -F " " "{print \$1}")
if [[ -z "${GITEA_AUTH_ID}" ]]; then
echo "No ldap configuration found with name '${LDAP_NAME}'. Installing it now..."
@ -184,28 +107,7 @@ stringData:
{{- if .Values.gitea.oauth }}
{{- range $idx, $value := .Values.gitea.oauth }}
local OAUTH_NAME={{ (printf "%s" $value.name) | squote }}
local full_auth_list=$(gitea admin auth list --vertical-bars)
local actual_auth_table=''
# We might have distorted output due to warning logs, so we have to detect the actual user table by its headline and trim output above that line
local regex="(.*)(ID\s+\|Name\s+\|Type\s+\|Enabled.*)"
if [[ "${full_auth_list}" =~ $regex ]]; then
actual_auth_table=$(echo "${BASH_REMATCH[2]}" | tail -n+2) # tail'ing to drop the table headline
else
# This code block should never be reached, as long as the output table header remains the same.
# If this code block is reached, the regex doesn't match anymore and we probably have to adjust this script.
echo "ERROR: 'configure_oauth' was not able to determine the current list of authentication sources."
echo " Please review the output of 'gitea admin auth list --vertical-bars' shown below."
echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-chart/issues."
echo "DEBUG: Output of 'gitea admin auth list --vertical-bars'"
echo "--"
echo "${full_auth_list}"
echo "--"
exit 1
fi
local AUTH_ID=$(echo "${actual_auth_table}" | grep -E "\|${OAUTH_NAME}\s+\|" | grep -iE '\|OAuth2\s+\|' | awk -F " " "{print \$1}")
local AUTH_ID=$(gitea admin auth list --vertical-bars | grep -E "\|${OAUTH_NAME}\s+\|" | grep -iE '\|OAuth2\s+\|' | awk -F " " "{print \$1}")
if [[ -z "${AUTH_ID}" ]]; then
echo "No oauth configuration found with name '${OAUTH_NAME}'. Installing it now..."

View file

@ -1,14 +0,0 @@
{{- if .Values.podDisruptionBudget -}}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "gitea.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "gitea.selectorLabels" . | nindent 6 }}
{{- toYaml .Values.podDisruptionBudget | nindent 2 }}
{{- end -}}

View file

@ -1,28 +0,0 @@
{{- if and .Values.persistence.enabled .Values.persistence.create }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ .Values.persistence.claimName }}
namespace: {{ include "common.names.namespace" . | quote }}
annotations:
{{ .Values.persistence.annotations | toYaml | indent 4}}
{{- if .Values.persistence.labels }}
labels:
{{ .Values.persistence.labels | toYaml | indent 4}}
{{- end }}
spec:
accessModes:
{{- if gt .Values.replicaCount 1.0 }}
- ReadWriteMany
{{- else }}
{{- .Values.persistence.accessModes | toYaml | nindent 4 }}
{{- end }}
volumeMode: Filesystem
{{- include "gitea.persistence.storageClass" . | nindent 2 }}
{{- with .Values.persistence.volumeName }}
volumeName: {{ . }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size }}
{{- end }}

View file

@ -1,43 +0,0 @@
{{- if .Values.route.enabled -}}
apiVersion: route.openshift.io/v1
kind: Route
metadata:
name: {{ include "gitea.fullname" . }}-http
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
annotations:
{{- toYaml .Values.route.annotations | nindent 4 }}
spec:
{{- if .Values.route.host }}
host: {{ tpl .Values.route.host $ | quote }}
{{- end }}
{{- if .Values.route.wildcardPolicy }}
wildcardPolicy: {{ .Values.route.wildcardPolicy }}
{{- end }}
to:
kind: Service
name: {{ include "gitea.fullname" . }}-http
weight: 100
port:
targetPort: http
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
{{- if .Values.route.tls.existingSecret }}
externalCertificate: {{ .Values.route.tls.existingSecret }}
{{- else if and .Values.route.tls.certificate
.Values.route.tls.privateKey
.Values.route.tls.caCertificate }}
certificate: |
{{ .Values.route.tls.certificate | indent 6 }}
key: |
{{ .Values.route.tls.privateKey | indent 6 }}
caCertificate: |
{{ .Values.route.tls.caCertificate | indent 6 }}
{{- else if or .Values.route.tls.certificate
.Values.route.tls.privateKey
.Values.route.tls.caCertificate }}
{{- fail "certificate, privateKey and caCertificate must be specified together" }}
{{- end }}
{{- end }}

View file

@ -1,21 +0,0 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "gitea.serviceAccountName" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.labels }}
{{- . | toYaml | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- . | toYaml | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
{{- with .Values.serviceAccount.imagePullSecrets }}
imagePullSecrets:
{{- . | toYaml | nindent 2 }}
{{- end }}
{{- end }}

View file

@ -3,7 +3,6 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "gitea.fullname" . }}
namespace: {{ default (include "common.names.namespace" .) .Values.gitea.metrics.serviceMonitor.namespace | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
{{- if .Values.gitea.metrics.serviceMonitor.additionalLabels }}

View file

@ -2,20 +2,13 @@ apiVersion: v1
kind: Service
metadata:
name: {{ include "gitea.fullname" . }}-ssh
namespace: {{ include "common.names.namespace" . | quote }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
{{- if .Values.service.ssh.labels }}
{{- toYaml .Values.service.ssh.labels | nindent 4 }}
{{- end }}
annotations:
{{- toYaml .Values.service.ssh.annotations | nindent 4 }}
spec:
type: {{ .Values.service.ssh.type }}
{{- if eq .Values.service.ssh.type "LoadBalancer" }}
{{- if .Values.service.ssh.loadBalancerClass }}
loadBalancerClass: {{ .Values.service.ssh.loadBalancerClass }}
{{- end }}
{{- if .Values.service.ssh.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.ssh.loadBalancerIP }}
{{- end -}}
@ -46,9 +39,7 @@ spec:
ports:
- name: ssh
port: {{ .Values.service.ssh.port }}
{{- if .Values.gitea.config.server.SSH_LISTEN_PORT }}
targetPort: ssh
{{- end }}
targetPort: {{ .Values.gitea.config.server.SSH_LISTEN_PORT }}
protocol: TCP
{{- if .Values.service.ssh.nodePort }}
nodePort: {{ .Values.service.ssh.nodePort }}

View file

@ -1,32 +1,22 @@
apiVersion: apps/v1
kind: Deployment
kind: StatefulSet
metadata:
name: {{ include "gitea.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
annotations:
{{- if .Values.deployment.annotations }}
{{- toYaml .Values.deployment.annotations | nindent 4 }}
{{- if .Values.statefulset.annotations }}
{{- toYaml .Values.statefulset.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
{{- if .Values.deployment.labels }}
{{- toYaml .Values.deployment.labels | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
type: {{ .Values.strategy.type }}
{{- if eq .Values.strategy.type "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.strategy.rollingUpdate.maxUnavailable }}
maxSurge: {{ .Values.strategy.rollingUpdate.maxSurge }}
{{- end }}
selector:
matchLabels:
{{- include "gitea.selectorLabels" . | nindent 6 }}
{{- if .Values.deployment.labels }}
{{- toYaml .Values.deployment.labels | nindent 6 }}
{{- if .Values.statefulset.labels }}
{{- toYaml .Values.statefulset.labels | nindent 6 }}
{{- end }}
serviceName: {{ include "gitea.fullname" . }}
template:
metadata:
annotations:
@ -42,22 +32,16 @@ spec:
{{- end }}
labels:
{{- include "gitea.labels" . | nindent 8 }}
{{- if .Values.deployment.labels }}
{{- toYaml .Values.deployment.labels | nindent 8 }}
{{- if .Values.statefulset.labels }}
{{- toYaml .Values.statefulset.labels | nindent 8 }}
{{- end }}
spec:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- if (or .Values.serviceAccount.create .Values.serviceAccount.name) }}
serviceAccountName: {{ include "gitea.serviceAccountName" . }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- include "gitea.images.pullSecrets" . | nindent 6 }}
securityContext:
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }}
{{- toYaml .Values.podSecurityContext | nindent 8 }}
initContainers:
- name: init-directories
image: "{{ include "gitea.image" . }}"
@ -72,8 +56,8 @@ spec:
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
{{- if .Values.deployment.env }}
{{- toYaml .Values.deployment.env | nindent 12 }}
{{- if .Values.statefulset.env }}
{{- toYaml .Values.statefulset.env | nindent 12 }}
{{- end }}
{{- if .Values.signing.enabled }}
- name: GNUPGHOME
@ -91,9 +75,7 @@ spec:
{{- end }}
{{- include "gitea.init-additional-mounts" . | nindent 12 }}
securityContext:
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
resources:
{{- toYaml .Values.initContainers.resources | nindent 12 }}
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
- name: init-app-ini
image: "{{ include "gitea.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
@ -107,8 +89,8 @@ spec:
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
{{- if .Values.deployment.env }}
{{- toYaml .Values.deployment.env | nindent 12 }}
{{- if .Values.statefulset.env }}
{{- toYaml .Values.statefulset.env | nindent 12 }}
{{- end }}
{{- if .Values.gitea.additionalConfigFromEnvs }}
{{- toYaml .Values.gitea.additionalConfigFromEnvs | nindent 12 }}
@ -131,9 +113,7 @@ spec:
{{- end }}
{{- include "gitea.init-additional-mounts" . | nindent 12 }}
securityContext:
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
resources:
{{- toYaml .Values.initContainers.resources | nindent 12 }}
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
{{- if .Values.signing.enabled }}
- name: configure-gpg
image: "{{ include "gitea.image" . }}"
@ -145,7 +125,7 @@ spec:
{{- if not (hasKey $csc "runAsUser") -}}
{{- $_ := set $csc "runAsUser" 1000 -}}
{{- end -}}
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" $csc "context" $) | nindent 12 }}
{{- toYaml $csc | nindent 12 }}
env:
- name: GNUPGHOME
value: {{ .Values.signing.gpgHome }}
@ -163,8 +143,6 @@ spec:
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.initContainers.resources | nindent 12 }}
{{- end }}
- name: configure-gitea
image: "{{ include "gitea.image" . }}"
@ -176,7 +154,7 @@ spec:
{{- if not (hasKey $csc "runAsUser") -}}
{{- $_ := set $csc "runAsUser" 1000 -}}
{{- end -}}
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" $csc "context" $) | nindent 12 }}
{{- toYaml $csc | nindent 12 }}
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
@ -186,10 +164,6 @@ spec:
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
{{- if .Values.image.rootless }}
- name: HOME
value: /data/gitea/git
{{- end }}
{{- if .Values.gitea.ldap }}
{{- range $idx, $value := .Values.gitea.ldap }}
{{- if $value.existingSecret }}
@ -244,10 +218,8 @@ spec:
- name: GITEA_ADMIN_PASSWORD
value: {{ .Values.gitea.admin.password | quote }}
{{- end }}
- name: GITEA_ADMIN_PASSWORD_MODE
value: {{ include "gitea.admin.passwordMode" $ }}
{{- if .Values.deployment.env }}
{{- toYaml .Values.deployment.env | nindent 12 }}
{{- if .Values.statefulset.env }}
{{- toYaml .Values.statefulset.env | nindent 12 }}
{{- end }}
volumeMounts:
- name: init
@ -260,9 +232,7 @@ spec:
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- include "gitea.init-additional-mounts" . | nindent 12 }}
resources:
{{- toYaml .Values.initContainers.resources | nindent 12 }}
terminationGracePeriodSeconds: {{ .Values.deployment.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.statefulset.terminationGracePeriodSeconds }}
containers:
- name: {{ .Chart.Name }}
image: "{{ include "gitea.image" . }}"
@ -273,10 +243,6 @@ spec:
value: {{ .Values.gitea.config.server.SSH_LISTEN_PORT | quote }}
- name: SSH_PORT
value: {{ .Values.gitea.config.server.SSH_PORT | quote }}
{{- if not .Values.image.rootless }}
- name: SSH_LOG_LEVEL
value: {{ .Values.gitea.ssh.logLevel | quote }}
{{- end }}
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
@ -287,16 +253,12 @@ spec:
value: /tmp/gitea
- name: TMPDIR
value: /tmp/gitea
{{- if .Values.image.rootless }}
- name: HOME
value: /data/gitea/git
{{- end }}
{{- if .Values.signing.enabled }}
- name: GNUPGHOME
value: {{ .Values.signing.gpgHome }}
{{- end }}
{{- if .Values.deployment.env }}
{{- toYaml .Values.deployment.env | nindent 12 }}
{{- if .Values.statefulset.env }}
{{- toYaml .Values.statefulset.env | nindent 12 }}
{{- end }}
ports:
- name: ssh
@ -327,9 +289,9 @@ spec:
securityContext:
{{- /* Honor the deprecated securityContext variable when defined */ -}}
{{- if .Values.containerSecurityContext -}}
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
{{ toYaml .Values.containerSecurityContext | nindent 12 -}}
{{- else -}}
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.securityContext "context" $) | nindent 12 }}
{{ toYaml .Values.securityContext | nindent 12 -}}
{{- end }}
volumeMounts:
- name: temp
@ -340,10 +302,6 @@ spec:
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- include "gitea.container-additional-mounts" . | nindent 12 }}
{{- with .Values.global.hostAliases }}
hostAliases:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
@ -352,10 +310,6 @@ spec:
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
@ -394,13 +348,38 @@ spec:
path: private.asc
defaultMode: 0100
{{- end }}
{{- if .Values.persistence.enabled }}
{{- if .Values.persistence.mount }}
{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }}
- name: data
persistentVolumeClaim:
claimName: {{ .Values.persistence.claimName }}
{{- with .Values.persistence.existingClaim }}
claimName: {{ tpl . $ }}
{{- end }}
{{- else if not .Values.persistence.enabled }}
- name: data
emptyDir: {}
{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
volumeClaimTemplates:
- metadata:
name: data
{{- with .Values.persistence.annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
{{- with .Values.persistence.labels }}
labels:
{{- range $key, $value := . }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
{{- include "gitea.persistence.storageClass" . | indent 8 }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- end }}

View file

@ -1,4 +1,3 @@
{{- if .Values.test.enabled }}
apiVersion: v1
kind: Pod
metadata:
@ -6,12 +5,11 @@ metadata:
labels:
{{ include "gitea.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: "{{ .Values.test.image.name }}:{{ .Values.test.image.tag }}"
image: busybox
command: ['wget']
args: ['{{ include "gitea.fullname" . }}-http:{{ .Values.service.http.port }}']
restartPolicy: Never
{{- end }}

View file

@ -1,12 +1,71 @@
import { getChangelog } from './changelog/util.js';
import conventionalChangelogCore from 'conventional-changelog-core';
import conventionalChangelogPreset from 'conventional-changelog-conventionalcommits';
import fs from 'node:fs';
const stream = getChangelog(!!process.argv[2]).setEncoding('utf8');
const config = conventionalChangelogPreset({
types: [
{
type: 'feat',
section: 'Features',
},
{
type: 'feature',
section: 'Features',
},
{
type: 'fix',
section: 'Bug Fixes',
},
{
type: 'perf',
section: 'Performance Improvements',
},
{
type: 'revert',
section: 'Reverts',
},
{
type: 'docs',
section: 'Documentation',
},
{
type: 'style',
section: 'Styles',
},
{
type: 'chore',
section: 'Miscellaneous Chores',
},
{
type: 'refactor',
section: 'Code Refactoring',
},
{
type: 'test',
section: 'Tests',
},
{
type: 'build',
section: 'Build System',
},
{
type: 'ci',
section: 'Continuous Integration',
},
],
});
const changes = (await stream.toArray()).join('');
const file = process.argv[3]
? fs.createWriteStream(process.argv[3])
: process.stdout;
if (!changes.length) {
console.error('No changelog found');
process.exit(1);
}
process.stdout.write(changes);
conventionalChangelogCore(
{
config,
releaseCount: 2,
},
{ version: process.argv[2], linkCompare: false },
undefined,
undefined,
{ headerPartial: '' }
).pipe(file);

View file

@ -1,73 +0,0 @@
import conventionalChangelogPreset from 'conventional-changelog-conventionalcommits';
import conventionalChangelogCore from 'conventional-changelog-core';
/**
* @type {import('conventional-changelog-core').Options}
*/
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-call
export const config = conventionalChangelogPreset({
types: [
{
type: 'feat',
section: 'Features',
},
{
type: 'fix',
section: 'Bug Fixes',
},
{
type: 'perf',
section: 'Performance Improvements',
},
{
type: 'revert',
section: 'Reverts',
},
{
type: 'docs',
section: 'Documentation',
},
{
type: 'style',
section: 'Styles',
},
{
type: 'refactor',
section: 'Code Refactoring',
},
{
type: 'test',
section: 'Tests',
},
{
type: 'build',
section: 'Build System',
},
{
type: 'ci',
section: 'Continuous Integration',
},
{
type: 'chore',
section: 'Miscellaneous Chores',
},
],
});
/**
*
* @param {boolean|undefined} onTag
* @returns
*/
export function getChangelog(onTag = false) {
return conventionalChangelogCore(
{
config,
releaseCount: onTag ? 2 : 1,
},
undefined,
undefined,
undefined,
{ headerPartial: '' },
);
}

View file

@ -1,7 +0,0 @@
# https://github.com/helm/chart-testing/blob/main/doc/ct_install.md
helm-extra-args: --timeout 3m
check-version-increment: false
debug: true
target-branch: main
lint-conf: .yamllint
validate-maintainers: false # does not work with gitea

View file

@ -1,100 +0,0 @@
import { Command, runExit } from 'clipanion';
import { getChangelog } from './changelog/util.js';
class GiteaReleaseCommand extends Command {
async execute() {
const api = process.env.GITHUB_API_URL;
const repo = process.env.GITHUB_REPOSITORY;
const token = process.env.GITHUB_TOKEN;
const tag = process.env.GITHUB_REF_NAME;
if (!api) {
this.context.stdout.write(
'GITHUB_API_URL environment variable not set.\n',
);
return 1;
} else {
this.context.stdout.write(`Using api: ${api}.\n`);
}
if (!token) {
this.context.stdout.write('GITHUB_TOKEN environment variable not set.\n');
return 1;
}
if (!repo) {
this.context.stdout.write(
'GITHUB_REPOSITORY environment variable not set.\n',
);
return 1;
} else {
this.context.stdout.write(`Using repository: ${repo}.\n`);
}
if (!tag) {
this.context.stdout.write(
'GITHUB_REF_NAME environment variable not set.',
);
return 1;
} else {
this.context.stdout.write(`Using tag: ${tag}.\n`);
}
this.context.stdout.write(`Checking remote tag ${tag}.\n`);
let resp = await fetch(`${api}/repos/${repo}/tags/${tag}`, {
headers: {
Authorization: `Bearer ${token}`,
},
});
if (!resp.ok) {
this.context.stdout.write(`Tag ${tag} not found on remote.\n`);
return 1;
}
this.context.stdout.write(`Checking remote release ${tag}.\n`);
resp = await fetch(`${api}/repos/${repo}/releases/tags/${tag}`, {
headers: {
Authorization: `Bearer ${token}`,
},
});
if (resp.ok) {
this.context.stdout.write(`Release ${tag} already exists.\n`);
return 1;
} else if (resp.status !== 404) {
this.context.stdout.write(
`Error checking for release ${tag}.\n${resp.status}: ${resp.statusText}\n`,
);
return 1;
}
const stream = getChangelog(true).setEncoding('utf8');
const changes = (await stream.toArray()).join('');
this.context.stdout.write(`Creating release ${tag}.\n`);
resp = await fetch(`${api}/repos/${repo}/releases`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`,
},
body: JSON.stringify({
draft: false,
prerelease: tag.includes('-'),
tag_name: tag,
name: tag.replace(/^v/, ''),
body: changes,
target_commitish: 'main',
}),
});
if (!resp.ok) {
this.context.stdout.write(
`Error creating release ${tag}.\n${resp.status}: ${resp.statusText}\n`,
);
return 1;
}
}
}
void runExit(GiteaReleaseCommand);

View file

@ -1,3 +0,0 @@
{
"type": "module"
}

View file

@ -1,66 +0,0 @@
suite: config template | cache config
release:
name: gitea-unittests
namespace: testing
tests:
- it: 'cache is configured correctly for redis-cluster'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: true
redis:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=redis
HOST=redis+cluster://:@gitea-unittests-redis-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: 'cache is configured correctly for redis'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=redis
HOST=redis://:changeme@gitea-unittests-redis-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: "cache is configured correctly for 'memory' when redis (or redis-cluster) is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=memory
HOST=
- it: 'cache can be customized when redis (or redis-cluster) is disabled'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: false
gitea.config.cache.ADAPTER: custom-adapter
gitea.config.cache.HOST: custom-host
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=custom-adapter
HOST=custom-host

View file

@ -1,30 +0,0 @@
suite: config template | database section (postgresql-ha)
release:
name: gitea-unittests
namespace: testing
tests:
- it: connects to pgpool service
template: templates/gitea/config.yaml
set:
postgresql:
enabled: false
postgresql-ha:
enabled: true
asserts:
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: HOST=gitea-unittests-postgresql-ha-pgpool.testing.svc.cluster.local:5432
- it: renders the referenced service
template: charts/postgresql-ha/templates/pgpool/service.yaml
set:
postgresql:
enabled: false
postgresql-ha:
enabled: true
asserts:
- containsDocument:
kind: Service
apiVersion: v1
name: gitea-unittests-postgresql-ha-pgpool
namespace: testing

View file

@ -1,30 +0,0 @@
suite: config template | database section (postgresql)
release:
name: gitea-unittests
namespace: testing
tests:
- it: 'connects to postgresql service'
template: templates/gitea/config.yaml
set:
postgresql:
enabled: true
postgresql-ha:
enabled: false
asserts:
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: HOST=gitea-unittests-postgresql.testing.svc.cluster.local:5432
- it: 'renders the referenced service'
template: charts/postgresql/templates/primary/svc.yaml
set:
postgresql:
enabled: true
postgresql-ha:
enabled: false
asserts:
- containsDocument:
kind: Service
apiVersion: v1
name: gitea-unittests-postgresql
namespace: testing

View file

@ -1,66 +0,0 @@
suite: config template | queue config
release:
name: gitea-unittests
namespace: testing
tests:
- it: 'queue is configured correctly for redis-cluster'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: true
redis:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=redis+cluster://:@gitea-unittests-redis-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
TYPE=redis
- it: 'queue is configured correctly for redis'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=redis://:changeme@gitea-unittests-redis-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
TYPE=redis
- it: "queue is configured correctly for 'levelDB' when redis (and redis-cluster) is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=
TYPE=level
- it: 'queue can be customized when redis (and redis-cluster) are disabled'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: false
gitea.config.queue.TYPE: custom-type
gitea.config.queue.CONN_STR: custom-connection-string
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=custom-connection-string
TYPE=custom-type

View file

@ -1,67 +0,0 @@
suite: config template | server section (domain related)
release:
name: gitea-unittests
namespace: testing
tests:
- it: '[default values] uses ingress host for DOMAIN|SSH_DOMAIN|ROOT_URL'
template: templates/gitea/config.yaml
asserts:
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nDOMAIN=git.example.com
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nSSH_DOMAIN=git.example.com
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nROOT_URL=http://git.example.com
################################################
- it: '[no ingress hosts] uses gitea http service for DOMAIN|SSH_DOMAIN|ROOT_URL'
template: templates/gitea/config.yaml
set:
ingress:
hosts: []
asserts:
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nDOMAIN=gitea-unittests-forgejo-http.testing.svc.cluster.local
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nSSH_DOMAIN=gitea-unittests-forgejo-http.testing.svc.cluster.local
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nROOT_URL=http://gitea-unittests-forgejo-http.testing.svc.cluster.local
################################################
- it: '[provided via values] uses that for DOMAIN|SSH_DOMAIN|ROOT_URL'
template: templates/gitea/config.yaml
set:
gitea.config.server.DOMAIN: provided.example.com
ingress:
hosts:
- host: non-used.example.com
paths:
- path: /
pathType: Prefix
asserts:
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nDOMAIN=provided.example.com
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nSSH_DOMAIN=provided.example.com
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: \nROOT_URL=http://provided.example.com

View file

@ -1,66 +0,0 @@
suite: config template | session config
release:
name: gitea-unittests
namespace: testing
tests:
- it: 'session is configured correctly for redis-cluster'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: true
redis:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=redis
PROVIDER_CONFIG=redis+cluster://:@gitea-unittests-redis-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: 'session is configured correctly for redis'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=redis
PROVIDER_CONFIG=redis://:changeme@gitea-unittests-redis-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: "session is configured correctly for 'memory' when redis (and redis-cluster) is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=memory
PROVIDER_CONFIG=
- it: 'session can be customized when redis (and redis-cluster) is disabled'
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: false
gitea.config.session.PROVIDER: custom-provider
gitea.config.session.PROVIDER_CONFIG: custom-provider-config
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=custom-provider
PROVIDER_CONFIG=custom-provider-config

View file

@ -1,57 +0,0 @@
suite: Dependency update consistency
release:
name: gitea-unittests
namespace: testing
tests:
- it: '[postgresql-ha] ensures we detect major image version upgrades'
template: charts/postgresql-ha/templates/postgresql/statefulset.yaml
set:
postgresql:
enabled: false
postgresql-ha:
enabled: true
asserts:
- documentIndex: 0
matchRegex:
path: spec.template.spec.containers[0].image
# IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST
pattern: ^docker.io/bitnami/postgresql-repmgr:17.+$
- it: '[postgresql] ensures we detect major image version upgrades'
template: charts/postgresql/templates/primary/statefulset.yaml
set:
postgresql:
enabled: true
postgresql-ha:
enabled: false
asserts:
- documentIndex: 0
matchRegex:
path: spec.template.spec.containers[0].image
# IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST
pattern: ^docker.io/bitnami/postgresql:17.+$
- it: '[redis-cluster] ensures we detect major image version upgrades'
template: charts/redis-cluster/templates/redis-statefulset.yaml
set:
redis-cluster:
enabled: true
redis:
enabled: false
asserts:
- documentIndex: 0
matchRegex:
path: spec.template.spec.containers[0].image
# IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST
pattern: bitnami/redis-cluster:7.+$
- it: '[redis] ensures we detect major image version upgrades'
template: charts/redis/templates/master/application.yaml
set:
redis-cluster:
enabled: false
redis:
enabled: true
asserts:
- documentIndex: 0
matchRegex:
path: spec.template.spec.containers[0].image
# IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST
pattern: bitnami/redis:7.+$

View file

@ -1,59 +0,0 @@
suite: deployment template (HA)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: fails with multiple replicas and "GIT_GC_REPOS" enabled
template: templates/gitea/deployment.yaml
set:
replicaCount: 2
persistence:
accessModes:
- ReadWriteMany
gitea:
config:
cron:
GIT_GC_REPOS:
ENABLED: true
asserts:
- failedTemplate:
errorMessage: "Invoking the garbage collector via CRON is not yet supported when running with multiple replicas. Please set 'cron.GIT_GC_REPOS.enabled = false'."
- it: fails with multiple replicas and RWX file system not set
template: templates/gitea/deployment.yaml
set:
replicaCount: 2
asserts:
- failedTemplate:
errorMessage: 'When using multiple replicas, a RWX file system is required and gitea.persistence.accessModes[0] must be set to ReadWriteMany.'
- it: fails with multiple replicas and bleve issue indexer
template: templates/gitea/deployment.yaml
set:
replicaCount: 2
persistence:
accessModes:
- ReadWriteMany
gitea:
config:
indexer:
ISSUE_INDEXER_TYPE: bleve
asserts:
- failedTemplate:
errorMessage: "When using multiple replicas, the issue indexer (gitea.config.indexer.ISSUE_INDEXER_TYPE) must be set to a HA-ready provider such as 'meilisearch', 'elasticsearch' or 'db' (if the DB is HA-ready)."
- it: fails with multiple replicas and bleve repo indexer
template: templates/gitea/deployment.yaml
set:
replicaCount: 2
persistence:
accessModes:
- ReadWriteMany
gitea:
config:
indexer:
REPO_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true
asserts:
- failedTemplate:
errorMessage: "When using multiple replicas, the repo indexer (gitea.config.indexer.REPO_INDEXER_TYPE) must be set to 'meilisearch' or 'elasticsearch' or disabled."

View file

@ -1,31 +0,0 @@
suite: deployment template (basic)
release:
name: forgejo-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: renders a deployment
template: templates/gitea/deployment.yaml
asserts:
- hasDocuments:
count: 1
- containsDocument:
kind: Deployment
apiVersion: apps/v1
name: forgejo-unittests
- it: deployment labels are set
template: templates/gitea/deployment.yaml
set:
deployment.labels:
hello: world
asserts:
- isSubset:
path: metadata.labels
content:
hello: world
- isSubset:
path: spec.template.metadata.labels
content:
hello: world

View file

@ -1,110 +0,0 @@
suite: deployment template (image configuration)
release:
name: gitea-unittests
namespace: testing
chart:
# Override appVersion to be consistent with used digest :)
appVersion: 1.19.3
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: default values
template: templates/gitea/deployment.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'code.forgejo.org/forgejo/forgejo:1.19.3-rootless'
- it: tag override
template: templates/gitea/deployment.yaml
set:
image.tag: '1.19.4'
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'code.forgejo.org/forgejo/forgejo:1.19.4-rootless'
- it: root-based image
template: templates/gitea/deployment.yaml
set:
image.rootless: false
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'code.forgejo.org/forgejo/forgejo:1.19.3'
- it: scoped registry
template: templates/gitea/deployment.yaml
set:
image.registry: 'example.com'
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'example.com/forgejo/forgejo:1.19.3-rootless'
- it: global registry
template: templates/gitea/deployment.yaml
set:
global.imageRegistry: 'global.example.com'
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'global.example.com/forgejo/forgejo:1.19.3-rootless'
- it: digest for rootless image
template: templates/gitea/deployment.yaml
set:
image:
rootless: true
digest: sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'code.forgejo.org/forgejo/forgejo:1.19.3-rootless@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a'
- it: image fullOverride (does not append rootless)
template: templates/gitea/deployment.yaml
set:
image:
fullOverride: gitea/gitea:1.19.3
# setting rootless, registry, repository, tag, and digest to prove that override works
rootless: true
registry: example.com
repository: example/image
tag: '1.0.0'
digest: sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'gitea/gitea:1.19.3'
- it: digest for root-based image
template: templates/gitea/deployment.yaml
set:
image:
rootless: false
digest: sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'code.forgejo.org/forgejo/forgejo:1.19.3@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a'
- it: digest and global registry
template: templates/gitea/deployment.yaml
set:
global.imageRegistry: 'global.example.com'
image.digest: 'sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a'
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: 'global.example.com/forgejo/forgejo:1.19.3-rootless@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a'
- it: correctly renders floating tag references
template: templates/gitea/deployment.yaml
set:
image.tag: 1.21 # use non-quoted value on purpose. See: https://gitea.com/gitea/helm-chart/issues/631
asserts:
- equal:
path: spec.template.spec.initContainers[0].image
value: 'code.forgejo.org/forgejo/forgejo:1.21-rootless'
- equal:
path: spec.template.spec.initContainers[1].image
value: 'code.forgejo.org/forgejo/forgejo:1.21-rootless'
- equal:
path: spec.template.spec.initContainers[2].image
value: 'code.forgejo.org/forgejo/forgejo:1.21-rootless'
- equal:
path: spec.template.spec.containers[0].image
value: 'code.forgejo.org/forgejo/forgejo:1.21-rootless'

View file

@ -1,47 +0,0 @@
suite: ingress template
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/ingress.yaml
tests:
- it: hostname using TPL
set:
global.giteaHostName: 'gitea.example.com'
ingress.enabled: true
ingress.hosts[0].host: '{{ .Values.global.giteaHostName }}'
ingress.tls:
- secretName: gitea-tls
hosts:
- '{{ .Values.global.giteaHostName }}'
asserts:
- isKind:
of: Ingress
- equal:
path: spec.tls[0].hosts[0]
value: 'gitea.example.com'
- equal:
path: spec.rules[0].host
value: 'gitea.example.com'
- it: Ingress Class using TPL
set:
global.ingress.className: 'ingress-class'
ingress.className: '{{ .Values.global.ingress.className }}'
ingress.enabled: true
ingress.hosts[0].host: 'some-host'
ingress.tls:
- secretName: gitea-tls
hosts:
- 'some-host'
asserts:
- isKind:
of: Ingress
- equal:
path: spec.tls[0].hosts[0]
value: 'some-host'
- equal:
path: spec.rules[0].host
value: 'some-host'
- equal:
path: spec.ingressClassName
value: 'ingress-class'

View file

@ -1,33 +0,0 @@
suite: config template
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/config.yaml
tests:
- it: inline config stringData.server using TPL
set:
global.giteaHostName: 'gitea.example.com'
ingress.enabled: true
ingress.hosts[0].host: '{{ .Values.global.giteaHostName }}'
ingress.tls:
- secretName: gitea-tls
hosts:
- '{{ .Values.global.giteaHostName }}'
asserts:
- documentIndex: 0
matchRegex:
path: metadata.name
pattern: .*-inline-config$
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: DOMAIN=gitea\.example\.com
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: ROOT_URL=https://gitea\.example\.com
- documentIndex: 0
matchRegex:
path: stringData.server
pattern: SSH_DOMAIN=gitea\.example\.com

View file

@ -1,155 +0,0 @@
# $schema: https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: route template
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/route.yaml
tests:
- it: hostname using TPL
set:
global.giteaHostName: 'gitea.example.com'
route.enabled: true
route.host: '{{ .Values.global.giteaHostName }}'
asserts:
- isKind:
of: Route
- equal:
path: spec.host
value: 'gitea.example.com'
- notExists:
path: spec.wildcardPolicy
- it: wildcard policy
set:
global.giteaHostName: 'gitea.example.com'
route.enabled: true
route.wildcardPolicy: 'Subdomain'
asserts:
- isKind:
of: Route
- equal:
path: spec.wildcardPolicy
value: 'Subdomain'
- it: existing certificate
set:
route.enabled: true
route.tls.existingSecret: certificate-secret
route.tls.certificate: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
route.tls.privateKey: |
-----BEGIN PRIVATE KEY-----
...
-----END PRIVATE KEY-----
route.tls.caCertificate: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
asserts:
- isKind:
of: Route
- equal:
path: spec.tls.externalCertificate
value: certificate-secret
- notExists:
path: spec.tls.certificate
- notExists:
path: spec.tls.key
- notExists:
path: spec.tls.caCertificate
- it: valid certificate values
set:
route.enabled: true
route.tls.certificate: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
route.tls.privateKey: |
-----BEGIN PRIVATE KEY-----
...
-----END PRIVATE KEY-----
route.tls.caCertificate: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
asserts:
- isKind:
of: Route
- notExists:
path: spec.tls.externalCertificate
- equal:
path: spec.tls.certificate
value: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
- equal:
path: spec.tls.key
value: |
-----BEGIN PRIVATE KEY-----
...
-----END PRIVATE KEY-----
- equal:
path: spec.tls.caCertificate
value: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
- it: missing certificate values
set:
route.enabled: true
route.tls.privateKey: |
-----BEGIN PRIVATE KEY-----
...
-----END PRIVATE KEY-----
route.tls.caCertificate: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
asserts:
- failedTemplate:
errorMessage: certificate, privateKey and caCertificate must be specified together
- it: missing privateKey values
set:
route.enabled: true
route.tls.certificate: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
route.tls.caCertificate: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
asserts:
- failedTemplate:
errorMessage: certificate, privateKey and caCertificate must be specified together
- it: missing caCertificate values
set:
route.enabled: true
route.tls.certificate: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
route.tls.privateKey: |
-----BEGIN PRIVATE KEY-----
...
-----END PRIVATE KEY-----
asserts:
- failedTemplate:
errorMessage: certificate, privateKey and caCertificate must be specified together

View file

@ -1,25 +0,0 @@
# $schema: https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: deployment template (security context)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: FS group set to 1000
template: templates/gitea/deployment.yaml
set:
image.rootless: false
asserts:
- equal:
path: spec.template.spec.securityContext.fsGroup
value: 1000
- it: run configure-gitea with UID 1000
template: templates/gitea/deployment.yaml
set:
image.rootless: false
asserts:
- equal:
path: spec.template.spec.initContainers[?(@.name == 'configure-gitea')].securityContext.runAsUser
value: 1000

View file

@ -1,25 +0,0 @@
# $schema: https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: deployment template (security context)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: FS group not set
template: templates/gitea/deployment.yaml
set:
image.rootless: false
global.compatibility.openshift.adaptSecurityContext: force
asserts:
- notExists:
path: spec.template.spec.securityContext.fsGroup
- it: configure-gitea without runaAsUser
template: templates/gitea/deployment.yaml
set:
image.rootless: false
global.compatibility.openshift.adaptSecurityContext: force
asserts:
- notExists:
path: spec.template.spec.initContainers[?(@.name == 'configure-gitea')].securityContext.runAsUser

View file

@ -1,64 +0,0 @@
suite: deployment template (SSH configuration)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: supports defining SSH log level for root based image
template: templates/gitea/deployment.yaml
set:
image.rootless: false
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: SSH_LOG_LEVEL
value: 'INFO'
- it: supports overriding SSH log level
template: templates/gitea/deployment.yaml
set:
image.rootless: false
gitea.ssh.logLevel: 'DEBUG'
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: SSH_LOG_LEVEL
value: 'DEBUG'
- it: supports overriding SSH log level (even when image.fullOverride set)
template: templates/gitea/deployment.yaml
set:
image.fullOverride: gitea/gitea:1.19.3
image.rootless: false
gitea.ssh.logLevel: 'DEBUG'
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: SSH_LOG_LEVEL
value: 'DEBUG'
- it: skips SSH_LOG_LEVEL for rootless image
template: templates/gitea/deployment.yaml
set:
image.rootless: true
gitea.ssh.logLevel: 'DEBUG' # explicitly defining a non-standard level here
asserts:
- notContains:
path: spec.template.spec.containers[0].env
any: true
content:
name: SSH_LOG_LEVEL
- it: skips SSH_LOG_LEVEL for rootless image (even when image.fullOverride set)
template: templates/gitea/deployment.yaml
set:
image.fullOverride: gitea/gitea:1.19.3
image.rootless: true
gitea.ssh.logLevel: 'DEBUG' # explicitly defining a non-standard level here
asserts:
- notContains:
path: spec.template.spec.containers[0].env
any: true
content:
name: SSH_LOG_LEVEL

View file

@ -1,39 +0,0 @@
# File: tests/gitea-storageclass-tests.yaml
suite: storage class configuration tests
release:
name: gitea-storageclass-tests
namespace: testing
templates:
- templates/gitea/pvc.yaml
tests:
- it: should set storageClassName when persistence.storageClass is defined
template: templates/gitea/pvc.yaml
set:
persistence.storageClass: 'my-storage-class'
asserts:
- equal:
path: 'spec.storageClassName'
value: 'my-storage-class'
- it: should set global.storageClass when persistence.storageClass is not defined
template: templates/gitea/pvc.yaml
set:
global.storageClass: 'default-storage-class'
asserts:
- equal:
path: spec.storageClassName
value: 'default-storage-class'
- it: should set storageClassName when persistence.storageClass is defined and global.storageClass is defined
template: templates/gitea/pvc.yaml
set:
global.storageClass: 'default-storage-class'
persistence.storageClass: 'my-storage-class'
asserts:
- equal:
path: spec.storageClassName
value: 'my-storage-class'

View file

@ -1,128 +0,0 @@
suite: ssh-svc / http-svc template (Services configuration)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/ssh-svc.yaml
- templates/gitea/http-svc.yaml
tests:
- it: supports adding custom labels to ssh-svc
template: templates/gitea/ssh-svc.yaml
set:
service:
ssh:
labels:
gitea/testkey: testvalue
asserts:
- equal:
path: metadata.labels["gitea/testkey"]
value: 'testvalue'
- it: keeps existing labels (ssh)
template: templates/gitea/ssh-svc.yaml
set:
service:
ssh:
labels: {}
asserts:
- exists:
path: metadata.labels["app"]
- it: supports adding custom labels to http-svc
template: templates/gitea/http-svc.yaml
set:
service:
http:
labels:
gitea/testkey: testvalue
asserts:
- equal:
path: metadata.labels["gitea/testkey"]
value: 'testvalue'
- it: keeps existing labels (http)
template: templates/gitea/http-svc.yaml
set:
service:
http:
labels: {}
asserts:
- exists:
path: metadata.labels["app"]
- it: uses default ports to ssh-svc
template: templates/gitea/ssh-svc.yaml
asserts:
- equal:
path: spec.ports[0].port
value: 22
- equal:
path: spec.ports[0].targetPort
value: ssh
- it: render service.ssh.loadBalancerClass if set and type is LoadBalancer
template: templates/gitea/ssh-svc.yaml
set:
service:
ssh:
loadBalancerClass: 'example.com/class'
type: LoadBalancer
loadBalancerIP: '1.2.3.4'
loadBalancerSourceRanges:
- '1.2.3.4/32'
- '5.6.7.8/32'
asserts:
- equal:
path: spec.loadBalancerClass
value: 'example.com/class'
- equal:
path: spec.loadBalancerIP
value: '1.2.3.4'
- equal:
path: spec.loadBalancerSourceRanges
value: ['1.2.3.4/32', '5.6.7.8/32']
- it: does not render when loadbalancer properties are set but type is not loadBalancerClass
template: templates/gitea/http-svc.yaml
set:
service:
http:
type: ClusterIP
loadBalancerClass: 'example.com/class'
loadBalancerIP: '1.2.3.4'
loadBalancerSourceRanges:
- '1.2.3.4/32'
- '5.6.7.8/32'
asserts:
- notExists:
path: spec.loadBalancerClass
- notExists:
path: spec.loadBalancerIP
- notExists:
path: spec.loadBalancerSourceRanges
- it: does not render loadBalancerClass by default even when type is LoadBalancer
template: templates/gitea/http-svc.yaml
set:
service:
http:
type: LoadBalancer
loadBalancerIP: '1.2.3.4'
asserts:
- notExists:
path: spec.loadBalancerClass
- equal:
path: spec.loadBalancerIP
value: '1.2.3.4'
- it: both ssh and http services exist
templates:
- templates/gitea/ssh-svc.yaml
- templates/gitea/http-svc.yaml
asserts:
- matchRegex:
path: metadata.name
pattern: '^gitea-unittests-forgejo-(?:ssh|http)$'
- matchRegex:
path: spec.ports[0].name
pattern: '^(?:ssh|http)$'

View file

@ -11,12 +11,12 @@ tests:
enabled: true
asserts:
- failedTemplate:
errorMessage: Either specify `signing.privateKey` or `signing.existingSecret`
errorMessage: Either specify `signing.privateKey` or `signing.existingKey`
- it: skips rendering using external secret reference
set:
signing:
enabled: true
existingSecret: 'external-secret-reference'
existingSecret: "external-secret-reference"
asserts:
- hasDocuments:
count: 0
@ -24,7 +24,7 @@ tests:
set:
signing:
enabled: true
privateKey: 'gpg-key-placeholder'
privateKey: "gpg-key-placeholder"
asserts:
- hasDocuments:
count: 1
@ -33,8 +33,8 @@ tests:
kind: Secret
apiVersion: v1
name: forgejo-unittests-gpg-key
- isNotNullOrEmpty:
- isNotEmpty:
path: metadata.labels
- equal:
path: data.privateKey
value: 'Z3BnLWtleS1wbGFjZWhvbGRlcg=='
value: "Z3BnLWtleS1wbGFjZWhvbGRlcg=="

View file

@ -1,87 +0,0 @@
suite: Init template (rootless)
release:
name: forgejo-unittests
namespace: testing
templates:
- templates/gitea/init.yaml
tests:
- it: runs gpg in batch mode
set:
signing.enabled: true
signing.privateKey: |-
-----BEGIN PGP PRIVATE KEY BLOCK-----
{placeholder}
-----END PGP PRIVATE KEY BLOCK-----
asserts:
- equal:
path: stringData["configure_gpg_environment.sh"]
value: |-
#!/usr/bin/env bash
set -eu
gpg --batch --import /raw/private.asc
- it: skips gpg script block for disabled signing
asserts:
- equal:
path: stringData["init_directory_structure.sh"]
value: |-
#!/usr/bin/env bash
set -euo pipefail
set -x
mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure
mkdir -p "${GITEA_TEMP}"
chmod ug+rwx "${GITEA_TEMP}"
- it: adds gpg script block for enabled signing
set:
signing.enabled: true
signing.privateKey: |-
-----BEGIN PGP PRIVATE KEY BLOCK-----
{placeholder}
-----END PGP PRIVATE KEY BLOCK-----
asserts:
- equal:
path: stringData["init_directory_structure.sh"]
value: |-
#!/usr/bin/env bash
set -euo pipefail
set -x
mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure
mkdir -p "${GITEA_TEMP}"
chmod ug+rwx "${GITEA_TEMP}"
if [ ! -d "${GNUPGHOME}" ]; then
mkdir -p "${GNUPGHOME}"
chmod 700 "${GNUPGHOME}"
chown 1000:1000 "${GNUPGHOME}"
fi
- it: it does not chown /data even when image.fullOverride is set
set:
image.fullOverride: gitea/gitea:1.20.5
asserts:
- equal:
path: stringData["init_directory_structure.sh"]
value: |-
#!/usr/bin/env bash
set -euo pipefail
set -x
mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure
mkdir -p "${GITEA_TEMP}"
chmod ug+rwx "${GITEA_TEMP}"

View file

@ -7,26 +7,19 @@ templates:
tests:
- it: runs gpg in batch mode
set:
image.rootless: false
signing.enabled: true
signing.privateKey: |-
-----BEGIN PGP PRIVATE KEY BLOCK-----
{placeholder}
-----END PGP PRIVATE KEY BLOCK-----
asserts:
- equal:
path: stringData["configure_gpg_environment.sh"]
path: stringData.[configure_gpg_environment.sh]
value: |-
#!/usr/bin/env bash
set -eu
gpg --batch --import /raw/private.asc
- it: skips gpg script block for disabled signing
set:
image.rootless: false
asserts:
- equal:
path: stringData["init_directory_structure.sh"]
path: stringData.[init_directory_structure.sh]
value: |-
#!/usr/bin/env bash
@ -44,15 +37,10 @@ tests:
chmod ug+rwx "${GITEA_TEMP}"
- it: adds gpg script block for enabled signing
set:
image.rootless: false
signing.enabled: true
signing.privateKey: |-
-----BEGIN PGP PRIVATE KEY BLOCK-----
{placeholder}
-----END PGP PRIVATE KEY BLOCK-----
asserts:
- equal:
path: stringData["init_directory_structure.sh"]
path: stringData.[init_directory_structure.sh]
value: |-
#!/usr/bin/env bash

View file

@ -1,19 +0,0 @@
suite: PVC template
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/pvc.yaml
tests:
- it: Storage Class using TPL
set:
global.persistence.storageClass: 'storage-class'
persistence.enabled: true
persistence.create: true
persistence.storageClass: '{{ .Values.global.persistence.storageClass }}'
asserts:
- isKind:
of: PersistentVolumeClaim
- equal:
path: spec.storageClassName
value: 'storage-class'

View file

@ -1,82 +0,0 @@
suite: ServiceAccount template (basic)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/serviceaccount.yaml
tests:
- it: skips rendering by default
asserts:
- hasDocuments:
count: 0
- it: renders default ServiceAccount object with serviceAccount.create=true
set:
serviceAccount.create: true
asserts:
- hasDocuments:
count: 1
- containsDocument:
kind: ServiceAccount
apiVersion: v1
name: gitea-unittests-forgejo
- equal:
path: automountServiceAccountToken
value: false
- notExists:
path: imagePullSecrets
- notExists:
path: metadata.annotations
- it: allows for adding custom labels
set:
serviceAccount:
create: true
labels:
custom: label
asserts:
- equal:
path: metadata.labels.custom
value: label
- it: allows for adding custom annotations
set:
serviceAccount:
create: true
annotations:
myCustom: annotation
asserts:
- equal:
path: metadata.annotations.myCustom
value: annotation
- it: allows to override the generated name
set:
serviceAccount:
create: true
name: provided-serviceaccount-name
asserts:
- equal:
path: metadata.name
value: provided-serviceaccount-name
- it: allows to mount the token
set:
serviceAccount:
create: true
automountServiceAccountToken: true
asserts:
- equal:
path: automountServiceAccountToken
value: true
- it: allows to reference image pull secrets
set:
serviceAccount:
create: true
imagePullSecrets:
- name: testing-image-pull-secret
- name: another-pull-secret
asserts:
- contains:
path: imagePullSecrets
content:
name: testing-image-pull-secret
- contains:
path: imagePullSecrets
content:
name: another-pull-secret

View file

@ -1,32 +0,0 @@
suite: ServiceAccount template (reference)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/serviceaccount.yaml
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: does not modify the deployment by default
template: templates/gitea/deployment.yaml
asserts:
- notExists:
path: spec.serviceAccountName
- it: adds the reference to the deployment with serviceAccount.create=true
template: templates/gitea/deployment.yaml
set:
serviceAccount.create: true
asserts:
- equal:
path: spec.template.spec.serviceAccountName
value: gitea-unittests-forgejo
- it: allows referencing an externally created ServiceAccount to the deployment
template: templates/gitea/deployment.yaml
set:
serviceAccount:
create: false # explicitly set to define rendering behavior
name: 'externally-existing-serviceaccount'
asserts:
- equal:
path: spec.template.spec.serviceAccountName
value: externally-existing-serviceaccount

View file

@ -0,0 +1,17 @@
suite: Statefulset template (basic)
release:
name: forgejo-unittests
namespace: testing
templates:
- templates/gitea/statefulset.yaml
- templates/gitea/config.yaml
tests:
- it: renders a statefulset
template: templates/gitea/statefulset.yaml
asserts:
- hasDocuments:
count: 1
- containsDocument:
kind: StatefulSet
apiVersion: apps/v1
name: forgejo-unittests

View file

@ -1,13 +1,13 @@
suite: deployment template (signing disabled)
suite: Statefulset template (signing disabled)
release:
name: forgejo-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/statefulset.yaml
- templates/gitea/config.yaml
tests:
- it: skips gpg init container
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
asserts:
- notContains:
path: spec.template.spec.initContainers
@ -15,24 +15,24 @@ tests:
content:
name: configure-gpg
- it: skips gpg env in `init-directories` init container
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
set:
signing.enabled: false
signing.enabled: true
asserts:
- notContains:
- contains:
path: spec.template.spec.initContainers[0].env
content:
name: GNUPGHOME
value: /data/git/.gnupg
- it: skips gpg env in runtime container
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
asserts:
- notContains:
path: spec.template.spec.containers[0].env
content:
name: GNUPGHOME
- it: skips gpg volume spec
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
asserts:
- notContains:
path: spec.template.spec.volumes

View file

@ -1,24 +1,24 @@
suite: deployment template (signing enabled)
suite: Statefulset template (signing enabled)
release:
name: forgejo-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/statefulset.yaml
- templates/gitea/config.yaml
tests:
- it: adds gpg init container
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
set:
signing:
enabled: true
existingSecret: 'custom-gpg-secret'
existingSecret: "custom-gpg-secret"
asserts:
- equal:
path: spec.template.spec.initContainers[2].name
value: configure-gpg
- equal:
path: spec.template.spec.initContainers[2].command
value: ['/usr/sbin/configure_gpg_environment.sh']
value: ["/usr/sbin/configure_gpg_environment.sh"]
- equal:
path: spec.template.spec.initContainers[2].securityContext
value:
@ -39,10 +39,9 @@ tests:
mountPath: /raw
readOnly: true
- it: adds gpg env in `init-directories` init container
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
set:
signing.enabled: true
signing.existingSecret: 'custom-gpg-secret'
asserts:
- contains:
path: spec.template.spec.initContainers[0].env
@ -50,10 +49,9 @@ tests:
name: GNUPGHOME
value: /data/git/.gnupg
- it: adds gpg env in runtime container
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
set:
signing.enabled: true
signing.existingSecret: 'custom-gpg-secret'
asserts:
- contains:
path: spec.template.spec.containers[0].env
@ -61,11 +59,10 @@ tests:
name: GNUPGHOME
value: /data/git/.gnupg
- it: adds gpg volume spec
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
set:
signing:
enabled: true
existingSecret: 'forgejo-unittests-gpg-key'
asserts:
- contains:
path: spec.template.spec.volumes
@ -78,7 +75,7 @@ tests:
path: private.asc
defaultMode: 0100
- it: supports gpg volume spec with external reference
template: templates/gitea/deployment.yaml
template: templates/gitea/statefulset.yaml
set:
signing:
enabled: true

View file

@ -1,14 +0,0 @@
suite: Values conflicting checks
release:
name: gitea-unittests
namespace: testing
tests:
- it: fails when trying to configure redis and redis-cluster the same time
set:
redis-cluster:
enabled: true
redis:
enabled: true
asserts:
- failedTemplate:
errorMessage: redis and redis-cluster cannot be enabled at the same time. Please only choose one.

View file

@ -6,57 +6,34 @@
## @param global.imageRegistry global image registry override
## @param global.imagePullSecrets global image pull secrets override; can be extended by `imagePullSecrets`
## @param global.storageClass global storage class override
## @param global.hostAliases global hostAliases which will be added to the pod's hosts files
global:
imageRegistry: ''
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ''
hostAliases: []
# - ip: 192.168.137.2
# hostnames:
# - example.com
storageClass: ""
## @param namespaceOverride String to fully override common.names.namespace
##
namespaceOverride: ''
## @param replicaCount number of replicas for the deployment
## @param replicaCount number of replicas for the statefulset
replicaCount: 1
## @section strategy
## @param strategy.type strategy type
## @param strategy.rollingUpdate.maxSurge maxSurge
## @param strategy.rollingUpdate.maxUnavailable maxUnavailable
strategy:
type: 'RollingUpdate'
rollingUpdate:
maxSurge: '100%'
maxUnavailable: 0
## @param clusterDomain cluster domain
clusterDomain: cluster.local
## @section Image
## @param image.registry image registry, e.g. gcr.io,docker.io
## @param image.repository Image to start for this pod
## @param image.tag Visit: [Image tag](https://code.forgejo.org/forgejo/-/packages/container/forgejo/versions). Defaults to `appVersion` within Chart.yaml.
## @param image.digest Image digest. Allows to pin the given image tag. Useful for having control over mutable tags like `latest`
## @param image.tag Visit: [Image tag](https://codeberg.org/forgejo/-/packages/container/forgejo/versions). Defaults to `appVersion` within Chart.yaml.
## @param image.pullPolicy Image pull policy
## @param image.rootless Wether or not to pull the rootless version of Forgejo
## @param image.fullOverride Completely overrides the image registry, path/image, tag and digest. **Adjust `image.rootless` accordingly and review [Rootless defaults](#rootless-defaults).**
## @param image.rootless Wether or not to pull the rootless version of Forgejo, only works on Forgejo 1.14.x or higher
image:
registry: code.forgejo.org
registry: "codeberg.org"
repository: forgejo/forgejo
# Overrides the image tag whose default is the chart appVersion.
tag: ''
digest: ''
pullPolicy: IfNotPresent
rootless: true
fullOverride: ''
tag: ""
pullPolicy: Always
rootless: false # only possible when running 1.14 or later
## @param imagePullSecrets Secret to use for pulling the image
imagePullSecrets: []
@ -92,16 +69,11 @@ containerSecurityContext: {}
## @param securityContext Run init and Forgejo containers as a specific securityContext
securityContext: {}
## @param podDisruptionBudget Pod disruption budget
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
## @section Service
service:
## @param service.http.type Kubernetes service type for web traffic
## @param service.http.port Port number for web traffic
## @param service.http.clusterIP ClusterIP setting for http autosetup for deployment
## @param service.http.clusterIP ClusterIP setting for http autosetup for statefulset is None
## @param service.http.loadBalancerIP LoadBalancer IP setting
## @param service.http.nodePort NodePort for http service
## @param service.http.externalTrafficPolicy If `service.http.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable source IP preservation
@ -110,12 +82,10 @@ service:
## @param service.http.ipFamilies HTTP service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/).
## @param service.http.loadBalancerSourceRanges Source range filter for http loadbalancer
## @param service.http.annotations HTTP service annotations
## @param service.http.labels HTTP service additional labels
## @param service.http.loadBalancerClass Loadbalancer class
http:
type: ClusterIP
port: 3000
clusterIP:
clusterIP: None
loadBalancerIP:
nodePort:
externalTrafficPolicy:
@ -124,11 +94,9 @@ service:
ipFamilies:
loadBalancerSourceRanges: []
annotations: {}
labels: {}
loadBalancerClass:
## @param service.ssh.type Kubernetes service type for ssh traffic
## @param service.ssh.port Port number for ssh traffic
## @param service.ssh.clusterIP ClusterIP setting for ssh autosetup for deployment
## @param service.ssh.clusterIP ClusterIP setting for ssh autosetup for statefulset is None
## @param service.ssh.loadBalancerIP LoadBalancer IP setting
## @param service.ssh.nodePort NodePort for ssh service
## @param service.ssh.externalTrafficPolicy If `service.ssh.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable source IP preservation
@ -138,12 +106,10 @@ service:
## @param service.ssh.hostPort HostPort for ssh service
## @param service.ssh.loadBalancerSourceRanges Source range filter for ssh loadbalancer
## @param service.ssh.annotations SSH service annotations
## @param service.ssh.labels SSH service additional labels
## @param service.ssh.loadBalancerClass Loadbalancer class
ssh:
type: ClusterIP
port: 22
clusterIP:
clusterIP: None
loadBalancerIP:
nodePort:
externalTrafficPolicy:
@ -153,8 +119,7 @@ service:
hostPort:
loadBalancerSourceRanges: []
annotations: {}
labels: {}
loadBalancerClass:
## @section Ingress
## @param ingress.enabled Enable ingress
@ -164,12 +129,12 @@ service:
## @param ingress.hosts[0].paths[0].path Default Ingress path
## @param ingress.hosts[0].paths[0].pathType Ingress path type
## @param ingress.tls Ingress tls settings
## @extra ingress.apiVersion Specify APIVersion of ingress object. Mostly would only be used for argocd.
ingress:
enabled: false
# className: nginx
className:
annotations:
{}
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
@ -181,54 +146,14 @@ ingress:
# - secretName: chart-example-tls
# hosts:
# - git.example.com
# Mostly for argocd or any other CI that uses `helm template | kubectl apply` or similar
# If helm doesn't correctly detect your ingress API version you can set it here.
# apiVersion: networking.k8s.io/v1
## @section Route
## @param route.enabled Enable route
## @param route.annotations Route annotations
## @param route.host Host to use for the route (will be assigned automatically by OKD / OpenShift is not defined)
## @param route.wildcardPolicy Wildcard policy if any for the route, currently only 'Subdomain' or 'None' is allowed.
## @param route.tls.termination termination type (see [OKD documentation](https://docs.okd.io/latest/rest_api/network_apis/route-route-openshift-io-v1.html#spec-tls))
## @param route.tls.insecureEdgeTerminationPolicy the desired behavior for insecure connections to a route (e.g. with http)
## @param route.tls.existingSecret the name of a predefined secret of type kubernetes.io/tls with both key (tls.crt and tls.key) set accordingly (if defined attributes 'certificate', 'caCertificate' and 'privateKey' are ignored)
## @param route.tls.certificate PEM encoded single certificate
## @param route.tls.privateKey PEM encoded private key
## @param route.tls.caCertificate PEM encoded CA certificate or chain that issued the certificate
## @param route.tls.destinationCACertificate PEM encoded CA certificate used to verify the authenticity of final end point when 'termination' is set to 'passthrough' (ignored otherwise)
route:
enabled: false
annotations: {}
host:
wildcardPolicy:
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
existingSecret:
certificate:
# certificate: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
privateKey:
# privateKey: |-
# -----BEGIN PRIVATE KEY-----
# ...
# -----END PRIVATE KEY-----
caCertificate:
# caCertificate: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
destinationCACertificate:
# destinationCACertificate: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
## @section deployment
## @section StatefulSet
#
## @param resources Kubernetes resources
resources:
{}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
@ -244,85 +169,54 @@ resources:
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
## @param schedulerName Use an alternate scheduler, e.g. "stork"
schedulerName: ''
schedulerName: ""
## @param nodeSelector NodeSelector for the deployment
## @param nodeSelector NodeSelector for the statefulset
nodeSelector: {}
## @param tolerations Tolerations for the deployment
## @param tolerations Tolerations for the statefulset
tolerations: []
## @param affinity Affinity for the deployment
## @param affinity Affinity for the statefulset
affinity: {}
## @param topologySpreadConstraints TopologySpreadConstraints for the deployment
topologySpreadConstraints: []
## @param dnsConfig dnsConfig for the deployment
## @param dnsConfig dnsConfig for the statefulset
dnsConfig: {}
## @param priorityClassName priorityClassName for the deployment
priorityClassName: ''
## @param deployment.env Additional environment variables to pass to containers
## @param deployment.terminationGracePeriodSeconds How long to wait until forcefully kill the pod
## @param deployment.labels Labels for the deployment
## @param deployment.annotations Annotations for the Forgejo deployment to be created
deployment:
env:
[]
## @param statefulset.env Additional environment variables to pass to containers
## @param statefulset.terminationGracePeriodSeconds How long to wait until forcefully kill the pod
## @param statefulset.labels Labels for the statefulset
## @param statefulset.annotations Annotations for the Forgejo StatefulSet to be created
statefulset:
env: []
# - name: VARIABLE
# value: my-value
terminationGracePeriodSeconds: 60
labels: {}
annotations: {}
## @section ServiceAccount
## @param serviceAccount.create Enable the creation of a ServiceAccount
## @param serviceAccount.name Name of the created ServiceAccount, defaults to release name. Can also link to an externally provided ServiceAccount that should be used.
## @param serviceAccount.automountServiceAccountToken Enable/disable auto mounting of the service account token
## @param serviceAccount.imagePullSecrets Image pull secrets, available to the ServiceAccount
## @param serviceAccount.annotations Custom annotations for the ServiceAccount
## @param serviceAccount.labels Custom labels for the ServiceAccount
serviceAccount:
create: false
name: ''
automountServiceAccountToken: false
imagePullSecrets: []
# - name: private-registry-access
annotations: {}
labels: {}
## @section Persistence
#
## @param persistence.enabled Enable persistent storage
## @param persistence.create Whether to create the persistentVolumeClaim for shared storage
## @param persistence.mount Whether the persistentVolumeClaim should be mounted (even if not created)
## @param persistence.claimName Use an existing claim to store repository information
## @param persistence.existingClaim Use an existing claim to store repository information
## @param persistence.size Size for persistence to store repo information
## @param persistence.accessModes AccessMode for persistence
## @param persistence.labels Labels for the persistence volume claim to be created
## @param persistence.annotations.helm.sh/resource-policy Resource policy for the persistence volume claim
## @param persistence.annotations Annotations for the persistence volume claim to be created
## @param persistence.storageClass Name of the storage class to use
## @param persistence.subPath Subdirectory of the volume to mount at
## @param persistence.volumeName Name of persistent volume in PVC
persistence:
enabled: true
create: true
mount: true
claimName: gitea-shared-storage
existingClaim:
size: 10Gi
accessModes:
- ReadWriteOnce
labels: {}
annotations: {}
storageClass:
subPath:
volumeName: ''
annotations:
helm.sh/resource-policy: keep
## @param extraVolumes Additional volumes to mount to the Forgejo deployment
## @param extraVolumes Additional volumes to mount to the Forgejo statefulset
extraVolumes: []
# - name: postgres-ssl-vol
# secret:
@ -346,7 +240,7 @@ extraVolumeMounts: []
## @section Init
## @param initPreScript Bash shell script copied verbatim to the start of the init-container.
initPreScript: ''
initPreScript: ""
#
# initPreScript: |
# mkdir -p /data/git/.postgresql
@ -354,32 +248,22 @@ initPreScript: ''
# chown -R git:git /data/git/.postgresql/
# chmod 400 /data/git/.postgresql/postgresql.key
## @param initContainers.resources.limits initContainers.limits Kubernetes resource limits for init containers
## @param initContainers.resources.requests.cpu initContainers.requests.cpu Kubernetes cpu resource limits for init containers
## @param initContainers.resources.requests.memory initContainers.requests.memory Kubernetes memory resource limits for init containers
initContainers:
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
# Configure commit/action signing prerequisites
## @section Signing
#
## @param signing.enabled Enable commit/action signing
## @param signing.gpgHome GPG home directory
## @param signing.privateKey Inline private GPG key for signed internal Git activity
## @param signing.privateKey Inline private gpg key for signed Forgejo actions
## @param signing.existingSecret Use an existing secret to store the value of `signing.privateKey`
signing:
enabled: false
gpgHome: /data/git/.gnupg
privateKey: ''
privateKey: ""
# privateKey: |-
# -----BEGIN PGP PRIVATE KEY BLOCK-----
# ...
# -----END PGP PRIVATE KEY BLOCK-----
existingSecret: ''
existingSecret: ""
## @section Gitea
#
@ -388,29 +272,24 @@ gitea:
## @param gitea.admin.existingSecret Use an existing secret to store admin user credentials
## @param gitea.admin.password Password for the Forgejo admin user
## @param gitea.admin.email Email for the Forgejo admin user
## @param gitea.admin.passwordMode Mode for how to set/update the admin user password. Options are: initialOnlyNoReset, initialOnlyRequireReset, and keepUpdated
admin:
#existingSecret: gitea-admin-secret
existingSecret:
username: gitea_admin
password: r8sA8CPHD9!bt6d
email: 'gitea@local.domain'
passwordMode: keepUpdated
email: "gitea@local.domain"
## @param gitea.metrics.enabled Enable Forgejo metrics
## @param gitea.metrics.serviceMonitor.enabled Enable Forgejo metrics service monitor
## @param gitea.metrics.serviceMonitor.namespace Namespace in which Prometheus is running
metrics:
enabled: false
serviceMonitor:
enabled: false
namespace: ''
# additionalLabels:
# prometheus-release: prom1
## @param gitea.ldap LDAP configuration
ldap:
[]
ldap: []
# - name: "LDAP 1"
# existingSecret:
# securityProtocol:
@ -427,8 +306,7 @@ gitea:
# Either specify inline `key` and `secret` or refer to them via `existingSecret`
## @param gitea.oauth OAuth configuration
oauth:
[]
oauth: []
# - name: 'OAuth 1'
# provider:
# key:
@ -441,6 +319,17 @@ gitea:
# customProfileUrl:
# customEmailUrl:
## @param gitea.config Configuration for the Forgejo server,ref: [config-cheat-sheet](https://docs.gitea.io/en-us/config-cheat-sheet/)
config: {}
# APP_NAME: "Forgejo: Git with a cup of tea"
# RUN_MODE: dev
#
# server:
# SSH_PORT: 22
#
# security:
# PASSWORD_COMPLEXITY: spec
## @param gitea.additionalConfigSources Additional configuration from secret or configmap
additionalConfigSources: []
# - secret:
@ -454,162 +343,6 @@ gitea:
## @param gitea.podAnnotations Annotations for the Forgejo pod
podAnnotations: {}
## @param gitea.ssh.logLevel Configure OpenSSH's log level. Only available for root-based Forgejo image.
ssh:
logLevel: 'INFO'
## @section `app.ini` overrides
## @descriptionStart
## Every value described in the [Cheat
## Sheet](https://forgejo.org/docs/latest/admin/config-cheat-sheet/) can be
## set as a Helm value. Configuration sections map to (lowercased) YAML
## blocks, while the keys themselves remain in all caps.
## @descriptionEnd
config:
# values in the DEFAULT section
# (https://forgejo.org/docs/latest/admin/config-cheat-sheet/#overall-default)
# are un-namespaced
## @param gitea.config.APP_NAME Application name, used in the page title
APP_NAME: 'Forgejo: Beyond coding. We forge.'
## @param gitea.config.RUN_MODE Application run mode, affects performance and debugging: `dev` or `prod`
RUN_MODE: prod
## @param gitea.config.repository General repository settings
repository: {}
## @param gitea.config.cors Cross-origin resource sharing settings
cors: {}
## @param gitea.config.ui User interface settings
ui: {}
## @param gitea.config.markdown Markdown parser settings
markdown: {}
## @param gitea.config.server [object] General server settings
server:
SSH_PORT: 22 # rootful image
SSH_LISTEN_PORT: 2222 # rootless image
## @param gitea.config.database Database configuration (only necessary with an [externally managed DB](https://code.forgejo.org/forgejo-helm/forgejo-helm#external-database)).
database: {}
## @param gitea.config.indexer Settings for what content is indexed and how
indexer: {}
## @param gitea.config.queue Job queue configuration
queue: {}
## @param gitea.config.admin Admin user settings
admin: {}
## @param gitea.config.security Site security settings
security: {}
## @param gitea.config.camo Settings for the [camo](https://github.com/cactus/go-camo) media proxy server (disabled by default)
camo: {}
## @param gitea.config.openid Configuration for authentication with OpenID (disabled by default)
openid: {}
## @param gitea.config.oauth2_client OAuth2 client settings
oauth2_client: {}
## @param gitea.config.service Configuration for miscellaneous Forgejo services
service: {}
## @param gitea.config.ssh.minimum_key_sizes SSH minimum key sizes
ssh.minimum_key_sizes: {}
## @param gitea.config.webhook Webhook settings
webhook: {}
## @param gitea.config.mailer Mailer configuration (disabled by default)
mailer: {}
## @param gitea.config.email.incoming Configuration for handling incoming mail (disabled by default)
email.incoming: {}
## @param gitea.config.cache Cache configuration
cache: {}
## @param gitea.config.session Session/cookie handling
session: {}
## @param gitea.config.picture User avatar settings
picture: {}
## @param gitea.config.project Project board defaults
project: {}
## @param gitea.config.attachment Issue and PR attachment configuration
attachment: {}
## @param gitea.config.log Logging configuration
log: {}
## @param gitea.config.cron Cron job configuration
cron: {}
## @param gitea.config.git Global settings for Git
git: {}
## @param gitea.config.metrics Settings for the Prometheus endpoint (disabled by default)
metrics: {}
## @param gitea.config.api Settings for the Swagger API documentation endpoints
api: {}
## @param gitea.config.oauth2 Settings for the [OAuth2 provider](https://forgejo.org/docs/latest/admin/oauth2-provider/)
oauth2: {}
## @param gitea.config.i18n Internationalization settings
i18n: {}
## @param gitea.config.markup Configuration for advanced markup processors
markup: {}
## @param gitea.config.highlight.mapping File extension to language mapping overrides for syntax highlighting
highlight.mapping: {}
## @param gitea.config.time Locale settings
time: {}
## @param gitea.config.migrations Settings for Git repository migrations
migrations: {}
## @param gitea.config.federation Federation configuration
federation: {}
## @param gitea.config.packages Package registry settings
packages: {}
## @param gitea.config.mirror Configuration for repository mirroring
mirror: {}
## @param gitea.config.lfs Large File Storage configuration
lfs: {}
## @param gitea.config.repo-avatar Repository avatar storage configuration
repo-avatar: {}
## @param gitea.config.avatar User/org avatar storage configuration
avatar: {}
## @param gitea.config.storage General storage settings
storage: {}
## @param gitea.config.proxy Proxy configuration (disabled by default)
proxy: {}
## @param gitea.config.actions Configuration for [Forgejo Actions](https://forgejo.org/docs/latest/user/actions/)
actions: {}
## @param gitea.config.other Uncategorized configuration options
other: {}
## @section LivenessProbe
#
## @param gitea.livenessProbe.enabled Enable liveness probe
@ -633,8 +366,7 @@ gitea:
## @section ReadinessProbe
#
## @param gitea.readinessProbe.enabled Enable readiness probe
## @param gitea.readinessProbe.httpGet.path Path to probe for readiness
## @param gitea.readinessProbe.httpGet.port Port to probe for readiness
## @param gitea.readinessProbe.tcpSocket.port Port to probe for readiness
## @param gitea.readinessProbe.initialDelaySeconds Initial delay before readiness probe is initiated
## @param gitea.readinessProbe.timeoutSeconds Timeout for readiness probe
## @param gitea.readinessProbe.periodSeconds Period for readiness probe
@ -643,8 +375,7 @@ gitea:
# Modify the readiness probe for your needs or completely disable it by commenting out.
readinessProbe:
enabled: true
httpGet:
path: /api/healthz
tcpSocket:
port: http
initialDelaySeconds: 5
timeoutSeconds: 1
@ -672,121 +403,101 @@ gitea:
successThreshold: 1
failureThreshold: 10
## @section Redis&reg; Cluster
## @section Memcached
## @descriptionStart
## Redis&reg; Cluster is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) if enabled in the values.
## Full configuration options are available on their website.
## Redis cluster and [Redis](#redis) cannot be enabled at the same time.
## Memcached is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/memcached) if enabled in the values. Complete Configuration can be taken from their website.
## @descriptionEnd
#
## @param redis-cluster.enabled Enable redis cluster
## @param redis-cluster.usePassword Whether to use password authentication
## @param redis-cluster.cluster.nodes Number of redis cluster master nodes
## @param redis-cluster.cluster.replicas Number of redis cluster master node replicas
redis-cluster:
## @param memcached.enabled Enable Memcached
## @param memcached.service.ports.memcached Port for Memcached
memcached:
enabled: true
usePassword: false
cluster:
nodes: 3 # default: 6
replicas: 0 # default: 1
## @section Redis&reg;
## @descriptionStart
## Redis&reg; is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/redis) if enabled in the values.
## Full configuration options are available on their website.
## Redis and [Redis cluster](#redis-cluster) cannot be enabled at the same time.
## @descriptionEnd
#
## @param redis.enabled Enable redis standalone or replicated
## @param redis.architecture Whether to use standalone or replication
## @param redis.global.redis.password Required password
## @param redis.master.count Number of Redis master instances to deploy
redis:
enabled: false
architecture: standalone
global:
redis:
password: changeme
master:
count: 1
## @section PostgreSQL HA
## @descriptionStart
## PostgreSQL HA is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) if enabled in the values.
## Full configuration options are available on their website.
## @descriptionEnd
#
## @param postgresql-ha.enabled Enable PostgreSQL HA chart
## @param postgresql-ha.postgresql.password Password for the `gitea` user (overrides `auth.password`)
## @param postgresql-ha.global.postgresql.database Name for a custom database to create (overrides `auth.database`)
## @param postgresql-ha.global.postgresql.username Name for a custom user to create (overrides `auth.username`)
## @param postgresql-ha.global.postgresql.password Name for a custom password to create (overrides `auth.password`)
## @param postgresql-ha.postgresql.repmgrPassword Repmgr Password
## @param postgresql-ha.postgresql.postgresPassword postgres Password
## @param postgresql-ha.pgpool.adminPassword pgpool adminPassword
## @param postgresql-ha.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
## @param postgresql-ha.primary.persistence.size PVC Storage Request for PostgreSQL HA volume
postgresql-ha:
global:
postgresql:
database: gitea
password: gitea
username: gitea
enabled: true
postgresql:
repmgrPassword: changeme2
postgresPassword: changeme1
password: changeme4
pgpool:
adminPassword: changeme3
service:
ports:
postgresql: 5432
primary:
persistence:
size: 10Gi
memcached: 11211
## @section PostgreSQL
## @descriptionStart
## PostgreSQL is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) if enabled in the values.
## Full configuration options are available on their website.
## PostgreSQL is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) if enabled in the values. Complete Configuration can be taken from their website.
## @descriptionEnd
#
## @param postgresql.enabled Enable PostgreSQL
## @param postgresql.global.postgresql.auth.password Password for the `gitea` user (overrides `auth.password`)
## @param postgresql.global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`)
## @param postgresql.global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`)
## @param postgresql.global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
## @param postgresql.auth.database PostgreSQL database
## @param postgresql.auth.username PostgreSQL username
## @param postgresql.auth.password PostgreSQL username
## @param postgresql.auth.postgresPassword PostgreSQL admin password
## @param postgresql.primary.service.ports.postgresql Port to connect to PostgreSQL service
## @param postgresql.primary.persistence.size PVC Storage Request for PostgreSQL volume
postgresql:
enabled: false
global:
postgresql:
enabled: true
auth:
password: gitea
database: gitea
username: gitea
password: gitea
postgresPassword: gitea
primary:
service:
ports:
postgresql: 5432
persistence:
size: 10Gi
## @section MySQL
## @descriptionStart
## MySQL is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/mysql) if enabled in the values. Complete Configuration can be taken from their website.
## @descriptionEnd
#
#
## @param mysql.enabled Enable MySQL
## @param mysql.auth.database Name for new database to create.
## @param mysql.auth.username Username of new user to create.
## @param mysql.auth.password Password for the new user.Ignored if existing secret is provided
## @param mysql.auth.rootPassword Password for the root user. Ignored if existing secret is provided
## @param mysql.primary.service.ports.mysql Port to connect to MySQL service
## @param mysql.primary.persistence.size PVC Storage Request for MySQL volume
mysql:
enabled: false
auth:
database: gitea
username: gitea
password: gitea
rootPassword: gitea
primary:
service:
ports:
mysql: 3306
persistence:
size: 10Gi
## @section MariaDB
## @descriptionStart
## MariaDB is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/mariadb) if enabled in the values. Complete Configuration can be taken from their website.
## @descriptionEnd
#
#
## @param mariadb.enabled Enable MariaDB
## @param mariadb.auth.database Name of the database to create.
## @param mariadb.auth.username Username of the new user to create.
## @param mariadb.auth.password Password for the new user. Ignored if existing secret is provided
## @param mariadb.auth.rootPassword Password for the root user.
## @param mariadb.primary.service.ports.mysql Port to connect to MariaDB service
## @param mariadb.primary.persistence.size Persistence size for MariaDB
mariadb:
enabled: false
auth:
database: gitea
username: gitea
password: gitea
rootPassword: gitea
primary:
service:
ports:
mysql: 3306
persistence:
size: 10Gi
# By default, removed or moved settings that still remain in a user defined values.yaml will cause Helm to fail running the install/update.
# Set it to false to skip this basic validation check.
## @section Advanced
## @param checkDeprecation Whether to run this basic validation check.
## @param test.enabled Whether to use test-connection Pod.
## @param test.image.name Image name for the wget container used in the test-connection Pod.
## @param test.image.tag Image tag for the wget container used in the test-connection Pod.
## @param checkDeprecation Set it to false to skip this basic validation check.
checkDeprecation: true
test:
enabled: true
image:
name: busybox
tag: latest
## @param extraDeploy Array of extra objects to deploy with the release.
##
extraDeploy: []