Compare commits

..

No commits in common. "development" and "development" have entirely different histories.

315 changed files with 147 additions and 15856 deletions

1
.gitignore vendored
View file

@ -1 +0,0 @@
node_modules

19
.vscode/settings.json vendored
View file

@ -1,3 +1,22 @@
{
"workbench.colorCustomizations": {
"activityBar.activeBackground": "#ab307e",
"activityBar.background": "#ab307e",
"activityBar.foreground": "#e7e7e7",
"activityBar.inactiveForeground": "#e7e7e799",
"activityBarBadge.background": "#25320e",
"activityBarBadge.foreground": "#e7e7e7",
"commandCenter.border": "#e7e7e799",
"sash.hoverBorder": "#ab307e",
"statusBar.background": "#832561",
"statusBar.foreground": "#e7e7e7",
"statusBarItem.hoverBackground": "#ab307e",
"statusBarItem.remoteBackground": "#832561",
"statusBarItem.remoteForeground": "#e7e7e7",
"titleBar.activeBackground": "#832561",
"titleBar.activeForeground": "#e7e7e7",
"titleBar.inactiveBackground": "#83256199",
"titleBar.inactiveForeground": "#e7e7e799"
},
"peacock.color": "#832561"
}

View file

@ -1,17 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.14.0/.schema/devbox.schema.json",
"packages": [
"nodejs@latest",
"terraform@latest"
],
"shell": {
"init_hook": [
"echo 'Welcome to devbox!' > /dev/null"
],
"scripts": {
"test": [
"echo \"Error: no test specified\" && exit 1"
]
}
}
}

View file

@ -1,121 +0,0 @@
{
"lockfile_version": "1",
"packages": {
"github:NixOS/nixpkgs/nixpkgs-unstable": {
"resolved": "github:NixOS/nixpkgs/2d9e4457f8e83120c9fdf6f1707ed0bc603e5ac9?lastModified=1741462378&narHash=sha256-ZF3YOjq%2BvTcH51S%2BqWa1oGA9FgmdJ67nTNPG2OIlXDc%3D"
},
"nodejs@latest": {
"last_modified": "2025-03-16T16:17:41Z",
"plugin_version": "0.0.2",
"resolved": "github:NixOS/nixpkgs/8f76cf16b17c51ae0cc8e55488069593f6dab645#nodejs_23",
"source": "devbox-search",
"version": "23.10.0",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/dihlffh62qmgzsrxq1igwxicdyr3fn8a-nodejs-23.10.0",
"default": true
},
{
"name": "libv8",
"path": "/nix/store/ks94i4365833bykrzg3d3mqxnciygyrn-nodejs-23.10.0-libv8"
}
],
"store_path": "/nix/store/dihlffh62qmgzsrxq1igwxicdyr3fn8a-nodejs-23.10.0"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/m7j1lf8a4z5bfla1m78pa3y12888hl7b-nodejs-23.10.0",
"default": true
},
{
"name": "libv8",
"path": "/nix/store/kfvlfxx83n2w2fyb8hiz4p4dc165r035-nodejs-23.10.0-libv8"
}
],
"store_path": "/nix/store/m7j1lf8a4z5bfla1m78pa3y12888hl7b-nodejs-23.10.0"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/nj0d1lc4nanqj7v4ibcgd26m3p5yfb0h-nodejs-23.10.0",
"default": true
},
{
"name": "libv8",
"path": "/nix/store/k5rvmvqyibamfxa7cfzjfd5ldmi38kf3-nodejs-23.10.0-libv8"
}
],
"store_path": "/nix/store/nj0d1lc4nanqj7v4ibcgd26m3p5yfb0h-nodejs-23.10.0"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/m7imcmwi4hschl257dzc33gxciqlf4bm-nodejs-23.10.0",
"default": true
},
{
"name": "libv8",
"path": "/nix/store/wy7ysxmd2ygdc5zpbhf9ripwgvvvnwsd-nodejs-23.10.0-libv8"
}
],
"store_path": "/nix/store/m7imcmwi4hschl257dzc33gxciqlf4bm-nodejs-23.10.0"
}
}
},
"terraform@latest": {
"last_modified": "2025-04-10T20:20:34Z",
"resolved": "github:NixOS/nixpkgs/d19cf9dfc633816a437204555afeb9e722386b76#terraform",
"source": "devbox-search",
"version": "1.11.4",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/46l1vs4h00h1y3n3xxwzab0a16mawfcs-terraform-1.11.4",
"default": true
}
],
"store_path": "/nix/store/46l1vs4h00h1y3n3xxwzab0a16mawfcs-terraform-1.11.4"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/knyqig364fi94f3z33q47jawv9b4h4sy-terraform-1.11.4",
"default": true
}
],
"store_path": "/nix/store/knyqig364fi94f3z33q47jawv9b4h4sy-terraform-1.11.4"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/9w7xlspipmx4kal4bagqnf76h0wv8lx8-terraform-1.11.4",
"default": true
}
],
"store_path": "/nix/store/9w7xlspipmx4kal4bagqnf76h0wv8lx8-terraform-1.11.4"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/xlg2aqgy2fwilpfnla4313f39vs0hhmb-terraform-1.11.4",
"default": true
}
],
"store_path": "/nix/store/xlg2aqgy2fwilpfnla4313f39vs0hhmb-terraform-1.11.4"
}
}
}
}
}

View file

@ -1,6 +0,0 @@
title: Technical doc
arrange:
- concepts
- architecture
- product
- project

View file

@ -1,62 +0,0 @@
---
status: "proposed"
decision-makers: {list everyone involved in the decision}
---
<!-- markdownlint-disable-next-line MD025 -->
# Replace Keycloak with OpenBao as OIDC Provider
## Context and Problem Statement
The EDP currently relies on Keycloak as the OpenID Connect (OIDC) provider to handle authentication and authorization. However, Keycloak is fairly complex and has quite some maintenance overhead, which will leads to increased operational effort. We need to determine if replacing Keycloak with OpenBao, a tool we already use for secrets management and which may support OIDC capabilities, can streamline our architecture and reduce these operational burdens.
## Decision Drivers
- Simplify architecture by reducing the number of tools in our ecosystem.
- Reduce operational complexity and maintenance overhead to improve team efficiency.
- Ensure seamless integration with existing systems, particularly leveraging our current use of OpenBao.
- Maintain or enhance security and performance to meet platform requirements.
## Considered Options
- Keep using Keycloak
- Replace Keycloak with OpenBao
## Decision Outcome
Chosen option: "Replace Keycloak with OpenBao", because it enables us to consolidate identity and secrets management into a single tool, reducing operational complexity, improving integration with our existing infrastructure, and potentially enhancing performance, provided OpenBao can meet our OIDC needs.
### Consequences
- *Good*, because it simplifies the architecture by reducing the number of tools we need to manage.
- *Good*, because it may lower operational costs by eliminating a separate system and leveraging an existing open-source tool.
- *Bad*, because additional configuration or development might be required to ensure OpenBao fully supports all necessary OIDC features.
- *Bad*, because relying on a single tool for both identity and secrets management increases risk if OpenBao encounters issues.
### Confirmation
- Conduct a proof-of-concept to validate that OpenBao can effectively serve as an OIDC provider meeting our platforms requirements.
- Validate that all EDP components support the Authorization Code Flow
- Review the design and implementation with the development team to confirm alignment with this decision.
## Pros and Cons of the Options
### Keep using Keycloak
Keycloak is a mature, feature-rich OIDC provider widely used for authentication and authorization.
- *Good*, because it offers extensive OIDC features, including support for single sign-on and various authentication protocols.
- *Good*, because it is already integrated into our platform, minimizing immediate changes.
- *Bad*, because its complexity increases configuration and maintenance efforts.
- *Bad*, because maintaining it as a separate tool adds to operational overhead.
### Replace Keycloak with OpenBao
OpenBao, a fork of HashiCorp Vault, is currently used for secrets management and may be configurable as an OIDC provider.
- *Good*, because consolidating identity and secrets management into one tool simplifies our architecture.
- *Good*, because it leverages our existing OpenBao deployment, potentially improving integration and reducing costs.
- *Bad*, because OpenBao may not natively support all advanced OIDC features provided by Keycloak, such as comprehensive user management.
- *Bad*, because its community and documentation for OIDC use cases may be less robust compared to Keycloak.
## More Information
Before finalizing this decision, we must verify OpenBaos OIDC capabilities against our specific authentication and authorization requirements, such as user federation and token issuance for our development platform. The team should also assess the long-term implications of relying heavily on OpenBao and consider revisiting this decision if significant gaps in OIDC functionality are identified during the proof-of-concept phase.

File diff suppressed because one or more lines are too long

View file

@ -1,6 +0,0 @@
---
title: Design
weight: 1
description: Edge Developver Framework Design
---

View file

@ -1,27 +0,0 @@
---
title: Architecture
weight: 1
description: High level EDP Architecture
---
## Architecture
> This architecture chart was discussed in the Berlin arch workshop Jan. 21st 2025
![alt text](architecture-IMG_20250121_163020.jpg)
## Additional notes
With respect to the meaning of 'Platform as a product' there are following notes to EDP as a product:
* Product items are
1. EDP Foundry
a) oppionated extreme, (aka 'locked down version')
b) 'construction set' ('baukasten') extreme
c) provide documentation
2. all EDP's themsselves
* 'product' in terms of a ciustomer view means two aspects:
* they get it provisionned, i.e. we do the bootstrapping
* they have it in maintenines state, i.w. we do the maintaining (compare to github: there is no github version to the customer!)

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 237 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 327 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 92 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 59 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 78 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 406 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 353 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 464 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 430 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 458 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 484 KiB

View file

@ -1,59 +0,0 @@
# MVP1-12-OTC Milestone
History:
* 24.04.25: Initial Arch done in lucid by Waldemar
* 06.05.25: Ported to C4 by Stephan
* 13.05.25: Review OTC-EDP
https://lucid.app/lucidspark/7134abcb-0c5e-41e5-93da-7118e285a668/edit?invitationId=inv_fbf0d105-7023-4740-806e-fcbb522ea61a&page=0_0#
## C4 Architecture
* https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/edp-doc/src/branch/development/likec4/views/deployment/otc/otc.c4
![alt text](./_assets/image.png)
![alt text](./_assets/image-1.png)
## Overall
### Architecture
![alt text](./_assets/EDPForgejoArch1.svg)
### Work packages
![alt text](./_assets/EDPForgejoArch2.svg)
## EDP Foundry
### Sub-Components 'Internal Services' and 'Central Observaability'
![alt text](./_assets/EDPForgejoArch3.svg)
## EDP (deployed per tenant)
### Sub-Components 'Kubernetes' and 'Cloud Services'
![alt text](./_assets/EDPForgejoArch4.svg)
## Operator (Provisioning Pipeline)
### Sub-Components 'Forgejo-Actions' and 'ArgoCD'
![alt text](./_assets/EDPForgejoArch5.svg)
![alt text](./_assets/EDPForgejoArch6.svg)
# OTC EDP product review on 13.05.25
![alt text](./_assets/edp-otc-1.png)
![alt text](./_assets/edp-otc-2.png)
![alt text](./_assets/edp-otc-3.png)
![alt text](./_assets/edp-otc-4.png)
![alt text](./_assets/edp-otc-5.png)

View file

@ -1,60 +0,0 @@
### WiP - needs to be reworked
Participants: Michael, Robert, Stephan
Date: Thu, 05.06.2025
# EDP Product Line 'Portal/Frontend/Forgejo'
[Also refer to other page here](../../../product/components/forgejo/index.md)
## how to proceed
* let's create tickets!
* let's mob!
## terms
Frontend: The part in the platfrom the user accesses. These are CLIs, APIs and Portals. Forgejo is our Portal.
### Agreement
* Forgejo=Portal (of the platform)
* Forgejo has a 'Frontend' - this is the touchpoint for users (like HTML-browsers)
* Forgejo has a backend, which delivers the content to be presented in the frontend
## issues
* Michael into ArchMeeting?! (agreement!)
* Robert/Stepan: discusion about our mindset - mobbing as avoiding missing detailed planning, in two streams
* Michael - Michal, Felix sind das 'Portal-Frontend'
* Tickets in Jira currently
* TODO creating tickets in forgejo / porting tickets from Jira
* we create the new forgejo-proect 'Portal-Frontend'
* zweiter meilenstein: Backend-activities need to be adressed in the future
* TODO have a look into the old backstage-portal
* TODO Stephna an Julius ... Logo/CICD: We should define Logo/name tenant name, product name, url schemas
### Development lifecycle
* create project in CFor
* create tasks in CFor
* TODO Stephan possible tasks
* Environments -> let's provide a 'productline-forgejo' env in OTC!
* Pipelines
* check all tests!
### Architecture
* Goals:
* Stay Open Source compatible!
* stay forgejo-downstreaming compatible!
* provide ADRs before breaking changes
#### forgejo-Architecture
* analyse, presnt ad discuss forgejo architecture
* Notice [Forgejo-Design guidleine](https://codeberg.org/forgejo/design/)
* Notice/You must read [Developer Doc](https://forgejo.org/docs/next/contributor/)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

View file

@ -1,735 +0,0 @@
# OSS licenceses and their impact on Forgejo development
Forgejo ist GPL copylefted [since v9 from October 2024](https://forgejo.org/2024-10-release-v9-0/). The entry to the [discussion is here (blog)](https://forgejo.org/2024-08-monthly-update/#forgejo-is-now-copyleft). and [here (ticket)](https://codeberg.org/forgejo/discussions/issues/192)
In the follwowing is the documentation of a chat with GPT about OSS licencing in general and the impact of Forgejo's copyleft on our Forgejo development.
## TL:DR;
* It looks like that as a SaaS/PaaS product EDP will not be affected to be 'infected' by GPL
* But if we share/distribute the code, all 'integrated' parts (like frontend code) needs to be published under GPL also.
### Forgejo release line
The following chart shows the Forgejo releases (and semantiv versioning) over time and their relation - soft- vs hardfork - to Gitea.
![alt text](forgejo-releases.jpg)
## Copyleft
> was ist copyleft software?
**Copyleft** ist ein Begriff aus dem Bereich der **freien Softwarelizenzen**. Er bezeichnet eine spezielle Lizenzform, bei der **die Freiheit, Software zu nutzen, zu verändern und weiterzugeben, sichergestellt und auf alle abgeleiteten Werke übertragen wird**.
### Einfach erklärt:
Wenn Software unter einer **Copyleft-Lizenz** steht:
* Darfst du sie **frei verwenden**, **verändern** und **weiterverbreiten**.
* **Aber:** Wenn du sie veränderst oder in eigene Programme einbaust und diese weiterverbreitest, **musst du deinen Code auch wieder unter derselben Lizenz veröffentlichen**.
### Ziel von Copyleft:
Copyleft verhindert, dass jemand freie Software nimmt, verbessert und **dann proprietär macht** (also als unfreie Software weiterverkauft). Es sorgt dafür, dass **Freiheit erhalten bleibt** auch bei Änderungen und Weiterentwicklungen.
---
### Beispiele für Copyleft-Lizenzen:
* **GNU General Public License (GPL)** stärkstes Copyleft
* **AGPL (Affero GPL)** wie GPL, aber auch bei Webdiensten wirksam
* **LGPL (Lesser GPL)** abgeschwächt, erlaubt Linken mit proprietärem Code
---
### Abgrenzung:
* **Copyleft**: Freiheit bleibt ansteckend (GPL).
* **Permissive Lizenzen** (wie MIT oder Apache): Du darfst den Code auch in proprietäre Software einbauen, ohne ihn wieder offenlegen zu müssen.
---
### Kurzes Beispiel:
Du nutzt eine GPL-lizenzierte Bibliothek in deinem Projekt.
→ Du verteilst dein Programm weiter.
→ Du **musst deinen Code offenlegen** und unter GPL stellen.
→ Du **darfst nicht** dein Programm schließen und sagen: „Ist jetzt meins.“
---
## Integration of proprietary own code
> ich arbeite in einer company. wir verwenden forgejo als ausgangsprodukt für unser eigenes produkt P. wir wollen forgejo zu einem portal ausbauen. forgejo hat seit v9 eine copyleft lizenz, es ist GPL 3 und höher. wenn jetzt meine company sagt, wir müssen in P die company-eigenen frontend komponenten (javascript und css) K einbauen, dann würde das doch bedeuten, dass bei der veröffentlichung von P alle K mit ausgeliefert werden müssen. Sind sie dann auch copyleft?
Das ist eine sehr gute und wichtige Frage. Du hast die Ausgangslage korrekt beschrieben:
* **Forgejo steht seit Version 9 unter der GPLv3 (Copyleft)**.
* Eure Firma baut darauf ein neues Produkt **P** auf.
* In **P** sollen proprietäre **Frontend-Komponenten K (JS + CSS)** integriert werden.
* Jetzt ist die Frage: **Werden K dadurch automatisch copyleft (GPL-3-lizenziert)?**
---
### Die juristisch relevante Frage ist:
> **Ist K als „abgeleitetes Werk“ (derivative work) von Forgejo zu betrachten?**
Wenn ja → **GPL-Virus** greift → K müsste **ebenfalls unter der GPL veröffentlicht werden**, sobald P verteilt wird.
Wenn nein → K kann **proprietär** bleiben.
---
### Einschätzung aus der Praxis:
#### 🔹 Forgejo (bzw. Gitea/Gogs) ist eine Webanwendung:
* Typisch: Backend (Go) + Web-Frontend (JS/HTML/CSS).
* GPLv3 umfasst das **gesamte Programm**, auch das Web-Frontend.
#### 🔹 Wenn ihr Frontend-Komponenten K in Forgejo-P einbettet:
* Also z.B. in `templates/*.tmpl` einbindet
* Oder sie mit Forgejos JS interagieren (über globale Objekte, Hooks, Module usw.)
* Oder sie direkt im Forgejo-Build-Prozess enthalten sind
→ Dann sind **K Teil des Gesamtwerks****GPL-pflichtig**, wenn ihr **P verteilt** (z.B. als installierbare Software oder über ein Download-Angebot).
#### 🔹 Wenn ihr P **nur als internen Service** betreibt (also nicht verteilt):
* Dann **greift die GPL nicht**. Die GPL regelt **Verbreitung**, nicht bloße Nutzung (auch intern oder SaaS).
* Ausnahme: Bei AGPL (Affero GPL) wäre das anders. Forgejo nutzt aber **nur GPL**, nicht AGPL.
---
### Fazit:
| Szenario | Müssen K offen gelegt werden? |
| --------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |
| **P wird nicht verteilt, nur gehostet (z.B. SaaS, internes Portal)** | ❌ Nein, GPL greift hier nicht |
| **P wird an Dritte verteilt (z.B. Kunden, Partner, zum Download)** | ✅ Ja, dann müssen **auch K offen gelegt werden**, wenn sie Teil des Gesamtsystems sind |
---
### Mögliche Strategien:
1. **Frontend-K modularisieren:**
* K **außerhalb** von Forgejo laden (z.B. über CDN, iframe, Microfrontend, WebExtension)
* Dann wäre K kein „abgeleitetes Werk“, sondern „separat“ → GPL greift nicht
2. **Forgejo entkoppeln:**
* Forgejo als **Backend-Komponente** nutzen, euer Portal ist UI-first
* Forgejo läuft z.B. headless oder als API-only-Service
3. **Lizenzpflicht akzeptieren:**
* K offenlegen und unter GPLv3 (oder kompatibler Lizenz) stellen
4. **Lizenzrechtlich beraten lassen:**
* Wenn euer Setup komplizierter ist (z.B. komplexes Zusammenspiel, modulare Bundles), ist eine rechtliche Einschätzung sinnvoll
---
## Headless, separtion of forgejo Frontend and Backend
> angenommen, Forgejo würde sich zu einem headless-Ansatz entwicklen und Frontend und Backend trennen. Und wenn unsere virulenten K Komponenten dann nur im Frontend wären, würde ja dieselbe diskussion gültig sein, diesmal halt nur mit dem Gesamtwerk 'Frontend'. Richtig?
Ganz genau **du hast das Prinzip vollständig verstanden**. Lass uns das nochmal im Detail klarmachen:
---
### 🧠 Grundidee: GPL „infiziert“ das **Gesamtwerk** bei *Verbreitung*
* Ob das „Gesamtwerk“ aus einem **monolithischen Projekt** besteht oder aus **Frontend + Backend getrennt** aufgebaut ist, ist **lizenzrechtlich egal**, solange die Teile als **ein zusammengehöriges Werk** vertrieben werden.
---
### 🔍 Headless Forgejo: Was ändert sich?
Angenommen Forgejo trennt sich vollständig in:
* **Forgejo Backend** (API, Auth, Git, etc.) → GPLv3
* **Forgejo Frontend** (UI, Web-App, REST-Client) → ebenfalls GPLv3
Nun baut ihr euer eigenes Frontend:
* Ihr verwendet Forgejos GPLv3-Frontend als Grundlage
* Ihr integriert eigene proprietäre Komponenten **K** (JS, CSS, React, etc.)
Dann gilt:
> Wenn **euer Frontend P'** auf dem GPLv3-Frontend von Forgejo basiert (also *abgeleitet ist*) und **weitergegeben wird**, dann **muss auch P' unter der GPLv3 stehen**.
→ Und damit **auch K**, wenn sie integraler Bestandteil sind.
---
### 🤔 Was ist „integraler Bestandteil“?
Wenn K z.B.:
* direkt in Forgejo-Frontend eingebunden wird (als Module, React-Components, Styles)
* im Build mit Forgejo-Code gebundled wird (z.B. über Webpack, Vite, Rollup)
* von GPL-Code aufgerufen oder mit GPL-Code gemeinsam ausgeliefert wird
→ Dann ist K **Teil des Frontend-Gesamtwerks** → **GPL-pflichtig**
---
### 🔓 Wie kann man K trotzdem proprietär halten?
Du musst K **technisch und lizenzrechtlich abtrennen**. Das geht z.B. durch:
#### 1. **Frontend-Plugins über offene Schnittstellen (APIs, Hooks)**
* Forgejo lädt externe Komponenten **dynamisch** zur Laufzeit (wie Plugins)
* Plugins liegen **nicht im selben Repository**, werden **nicht mitgebundled**
* Sie kommunizieren z.B. über Events oder JS-APIs
→ Diese Trennung kann stark genug sein, um K **nicht als abgeleitetes Werk** gelten zu lassen.
#### 2. **Microfrontend-Architektur**
* Forgejo-UI + K-Frontend als **völlig getrennte Webapps**
* Sie kommunizieren nur via REST
* Beide haben eigene Buildprozesse, Domains oder Deployments
→ Dann ist K **nur ein separater Client**, der Forgejo nutzt → **kein Copyleft-Risiko**
---
### ⚖️ Kurzes Lizenz-Merkblatt (bei Verteilung):
| Du verwendest... | Und veränderst... | Und verbreitest... | Dann musst du... |
| ---------------- | ----------------- | ------------------------ | ----------------------------------------------- |
| GPL-Frontend | nur API-Aufrufe | Nein | Nichts (Nutzung ist frei) |
| GPL-Frontend | nichts | Ja (z.B. als Proxy-App) | GPL-Code beilegen |
| GPL-Frontend | und fügst K ein | Ja | Dann wird K auch GPL (wenn nicht klar getrennt) |
---
**Fazit:**
Ja, auch bei Headless-Struktur bleibt die Diskussion **identisch**, nur auf **Frontend-Ebene**.
Was zählt, ist **Verbreitung** und **Abhängigkeit**: Wenn ihr Forgejos GPL-Code als **technisches Fundament** für euer eigenes UI nehmt und es **weitergebt**, dann **unterliegt das gesamte resultierende UI dem Copyleft** inklusive eurer Komponenten **K**, falls sie technisch integriert sind.
## Portal as SaaS/PaaS
> ok, und wenn unser P nur als SaaS bzw. PaaS verfügbar wird für Kunden, dann erübrigt sich die ganze Lizenzdiskussion?
Fast aber mit einer wichtigen Einschränkung:
---
### ✅ Ja: Wenn ihr **P nur als SaaS oder PaaS betreibt**, also **nicht verteilt**,
dann **greift die GPLv3 nicht**.
> **Die GPL regelt die *Verbreitung* (distribution)** von Software, **nicht deren Nutzung über ein Netzwerk**.
Das heißt:
* Ihr könnt **Forgejo (GPLv3)** hosten
* Ihr könnt es verändern
* Ihr könnt eigene Komponenten **K (JS/CSS)** integrieren, auch tief im System
* Solange ihr **P nicht als Download oder Produkt weitergebt**,
**müsst ihr keinerlei Quellcode veröffentlichen** auch nicht für K
---
### 🔒 Ausnahme: Die **AGPLv3**
Die **Affero GPLv3** (nicht die normale GPLv3!) **schließt diese Lücke**:
> Wenn Software **über ein Netzwerk bereitgestellt wird**, dann gilt das wie eine „Verbreitung“,
> und ihr müsst den **Quellcode bereitstellen**, inklusive Modifikationen.
Aber:
**Forgejo verwendet nur GPLv3**, **nicht** AGPLv3.
→ Daher **greift diese Ausnahme bei euch nicht**.
---
### 🧠 Fazit
| Szenario | GPLv3-Pflichten? |
| --------------------------------------------- | ------------------------------------------------- |
| P wird nur gehostet (SaaS/PaaS), nie verteilt | ❌ Nein |
| P wird als installierbares Produkt verteilt | ✅ Ja, alle abgeleiteten Werke müssen GPL sein |
| P nutzt AGPL-Komponenten | ⚠️ Ja, auch bei SaaS muss Code offengelegt werden |
---
### 💡 Empfehlung
Wenn ihr **zukünftig eventuell doch distributieren wollt**, wäre es gut, das in der Architektur zu berücksichtigen (z.B. K trennbar halten).
Solange ihr aber rein **als Betreiber eines SaaS/PaaS-Portals** handelt, seid ihr aus GPLv3-Sicht **vollkommen auf der sicheren Seite** auch mit euren proprietären K-Komponenten.
## AGPL vs GPL
> Das bedeutet ja, dass, im Sinne von OS, die AGPL strenger (schützneder) als die GPl ist?
Ganz genau **die AGPL ist eine „strenger schützende“ Lizenz als die GPL**, vor allem im Kontext von Cloud-/SaaS-Anbietern. Hier ist eine kurze, aber präzise Einordnung:
---
### 🔍 Unterschied: GPL vs. AGPL
| Lizenz | Muss Quellcode offengelegt werden bei… | Ziel |
| ---------- | ---------------------------------------------------------------------------------- | ----------------------- |
| **GPLv3** | Nur wenn Software **verbreitet** wird (z.B. Download, Installationspaket) | Schutz bei Distribution |
| **AGPLv3** | Auch wenn Software **nur übers Netzwerk bereitgestellt** wird (z.B. als **SaaS**) | Schutz auch bei Hosting |
---
### 🧠 Was schützt die AGPL mehr?
Die AGPL wurde entwickelt, um die sogenannte **„SaaS-Lücke“** in der GPL zu schließen:
* Viele Unternehmen nahmen GPL-Software, erweiterten sie, **veröffentlichten sie nie**, sondern **hosteten sie einfach als Service**.
* Ergebnis: **Sie profitierten von freier Software, gaben aber nichts zurück** ganz legal unter der GPL.
Die AGPL sagt daher:
> Wenn du Software nutzt, die unter der AGPL steht,
> **und Nutzer über ein Netzwerk mit ihr interagieren können**,
> **musst du deinen geänderten Quellcode offenlegen**
> auch **ohne Verbreitung der Software an sich**.
---
### 🎯 Beispiel aus der Praxis
* GitLab hat früher zwischen:
* **Open-Core GPLv3** (für das Backend),
* und **nicht-freien Premium-Modulen** unterschieden.
* Wäre GitLab stattdessen **AGPL-lizenziert** gewesen, hätten **Cloud-Anbieter wie AWS** den Code nicht einfach benutzen können, ohne ihre Änderungen zu teilen.
---
### 💡 Zusammenfassung
* **GPL schützt Freiheit bei Distribution**
* **AGPL schützt Freiheit auch bei Netzwerk-Zugriff**
* AGPL ist also:
* **restriktiver** für Unternehmen (die Software nur hosten)
* **schutzstärker** im Sinne der Open-Source-Idee
---
Wenn euch also wichtig ist, dass **niemand eure Software nehmen und als Cloud-Dienst hosten kann**, ohne den Code freizugeben, dann ist **AGPL die bessere Wahl**.
Aber für viele Firmen ist genau das der Grund, **AGPL zu vermeiden**, z.B. bei kommerziellem SaaS-Geschäft.
## MIT and LGPL
> Und LGPL und MIT? Wie sind diese Lizenzen in dieser Hinsicht zu bewerten?
Super Frage! Wenn wir GPL, **AGPL**, **LGPL** und **MIT** in Bezug auf **„Schutzwirkung“ im Sinne von Open Source“** vergleichen wollen, entsteht ein ziemlich klar gestaffeltes Bild:
---
### 🧭 Übersicht: GPL, AGPL, LGPL, MIT im Vergleich
| Lizenz | Verbreitungspflicht bei… | „Infiziert“ abgeleitete Werke? | Kommerziell verwendbar? | SaaS-Abdeckung? | Schutzwirkung (Stärke) |
| ---------- | -------------------------------------- | --------------------------------------- | ---------------------------------- | --------------- | ---------------------- |
| **MIT** | Nur bei direkter Weitergabe (optional) | ❌ Nein | ✅ Ja | ❌ Nein | 🔓 *Sehr schwach* |
| **LGPLv3** | Nur bei Verbreitung als *Library* | 🔸 Nur bei statischer/direkter Kopplung | ✅ Ja (dynamische Kopplung erlaubt) | ❌ Nein | ⚖️ *Moderat* |
| **GPLv3** | ✅ Ja, bei Verbreitung | ✅ Ja | ⚠️ Eingeschränkt | ❌ Nein | 🔐 *Stark* |
| **AGPLv3** | ✅ Ja, auch bei SaaS | ✅ Ja | ⚠️ Eingeschränkt | ✅ Ja | 🛡️ *Sehr stark* |
---
### 🔍 Detail: Was bedeuten diese Lizenzen im Alltag?
#### 1. **MIT License**
* Extrem permissiv: „Mach, was du willst aber lass den Lizenztext dran.“
* **Keine Copyleft-Wirkung**
* Unternehmen können MIT-Code integrieren, verändern, verkaufen **ohne irgendetwas offenzulegen**
🔧 **Typisch für:**
* Frontend-Libs (React, Lodash)
* Dev-Tools, CLI-Tools
* Universitätscode
---
#### 2. **LGPL (Lesser GPL)**
* Speziell für **Bibliotheken** entwickelt
* Erlaubt **dynamische Verlinkung** mit proprietärer Software
* Nur wenn du die Library **veränderst** oder **statisch einbindest**, musst du den Quellcode offenlegen
⚠️ **Aber Achtung:** Wenn du z.B. ein JavaScript-Modul *"inplace"* in dein Bundle einbaust, kann das **als „statische Kopplung“** gelten → Copyleft greift!
🔧 **Typisch für:**
* Standard-Bibliotheken wie glibc (GNU C Library)
* Frameworks, die auch kommerziell nutzbar sein sollen
---
#### 3. **GPLv3**
* Klassisches **„starkes Copyleft“**
* Wer verändert + verteilt, muss **den vollständigen Quellcode** offenlegen
* **ABER:** Keine Wirkung bei reinem SaaS-Hosting
🔧 **Typisch für:**
* Linux-Kernel (GPLv2)
* Forgejo, Gitea
* Viele CLI-Tools oder Compiler
---
#### 4. **AGPLv3**
* Wie GPLv3, **plus Netzwerk-Nutzung**
* „Wenn Benutzer über das Internet mit der Software interagieren können, dann musst du auch bei SaaS den Quellcode offenlegen“
🔧 **Typisch für:**
* Self-hosted SaaS-Alternativen (z.B. Matomo, CryptPad, GitLab CE früher)
* Community-Projekte, die „Cloud-Reselling“ verhindern wollen
---
### 🧠 Zusammenfassung als Diagramm
```text
⬆ Schutzwirkung / Copyleft
│ 🛡️ AGPLv3 → verteilt + SaaS → Quellcodepflicht
│ 🔐 GPLv3 → verteilt → Quellcodepflicht
│ ⚖️ LGPLv3 → Libs bei Verlinkung → schwächer
│ 🔓 MIT → keine Pflicht, frei nutzbar
└────────────▶ Freiheit für kommerzielle Nutzung
```
---
### 🔎 Fazit für dich:
Wenn du in deinem Unternehmen:
* **keinen Quellcode offenlegen willst**, ist MIT oder (je nach Fall) LGPL sinnvoll
* **SaaS-only anbietest**, und Open-Source-Code nutzt:
* **MIT, BSD, LGPL → unkritisch**
* **GPL → unkritisch (solange nicht verteilt)**
* **AGPL → kritisch, da *Quellcodeoffenlegungspflicht* trotz SaaS**
## Apache
> kannst du in die obige Analyse noch die Apache Lizenz mit aufnehmen? Also alle schritet 1-4, plus die Zusammanfassung bitte, denn das sieht seeeeht übersichtlich aus, was du da geschrieben hast!
Sehr gern! Hier ist die **erweiterte und überarbeitete Analyse**, diesmal mit der **Apache License 2.0** ergänzt inklusive aller Schritte, Unterschiede und der praktischen Zusammenfassung.
---
### 🧭 Übersicht: MIT, LGPL, GPLv3, AGPLv3, Apache 2.0 im Vergleich
| Lizenz | Verbreitungspflicht bei… | „Infiziert“ abgeleitete Werke? | Kommerziell verwendbar? | SaaS-Abdeckung? | Schutzwirkung |
| -------------- | -------------------------------------------- | ----------------------------------- | ----------------------- | --------------- | -------------------------------- |
| **MIT** | ✅ Nur bei direkter Weitergabe | ❌ Nein | ✅ Ja | ❌ Nein | 🔓 *Sehr schwach* |
| **Apache 2.0** | ✅ Nur bei Weitergabe (wie MIT) | ❌ Nein *(aber inkl. Patentklausel)* | ✅ Ja | ❌ Nein | 🔓 *Schwach* + 🧩 *Patentschutz* |
| **LGPLv3** | ✅ Bei Veränderung oder statischer Verlinkung | 🔸 Teilweise (nur bei Kopplung) | ✅ Ja (mit Vorsicht) | ❌ Nein | ⚖️ *Moderat* |
| **GPLv3** | ✅ Bei jeder Weitergabe | ✅ Ja (alles, was „abgeleitet“ ist) | ⚠️ Eingeschränkt | ❌ Nein | 🔐 *Stark* |
| **AGPLv3** | ✅ Auch bei SaaS/Remote-Zugriff | ✅ Ja | ⚠️ Eingeschränkt | ✅ Ja | 🛡️ *Sehr stark* |
---
### 🔍 Schritt 1: **MIT License**
* Extrem permissiv
* Du kannst fast alles damit machen (auch Closed Source)
* Nur Pflicht: Lizenztext beibehalten
* **Keine** Copyleft-Wirkung
* ⚠️ Keine Patentlizenz (aber selten ein Problem bei typischem JavaScript-Code etc.)
✅ Ideal für: Bibliotheken, die möglichst breit einsetzbar sein sollen
🧩 Kein Schutz gegen „Trittbrettfahrer“, aber maximal kompatibel
---
### 🔍 Schritt 2: **Apache License 2.0**
* Sehr ähnlich zu MIT, aber mit einem großen Zusatz:
* 📄 **Erteilt explizit eine Patentlizenz**
* 🛡️ **Schützt Nutzer davor, von ursprünglichen Autoren verklagt zu werden**
* Keine Copyleft-Wirkung
* Sehr **kompatibel mit kommerziellen Projekten**
* Weit verbreitet in Unternehmen (Google, Apache Software Foundation, etc.)
✅ Ideal für: Große Projekte, bei denen Patentsicherheit wichtig ist (Cloud, Machine Learning, etc.)
---
### 🔍 Schritt 3: **LGPLv3**
* „Mittelweg“ zwischen MIT/Apache und GPL
* Typisch für **dynamisch gelinkte Libraries**
* **Du darfst sie in Closed Source nutzen**, solange du:
* dynamisch linkst,
* **und** keine Änderungen an der LGPL-Bibliothek selbst machst
* Bei **statischer Einbindung**: GPL-Pflichten!
✅ Ideal für: Libraries, die *frei genutzt* werden sollen, ohne dass jedes Projekt unter GPL stehen muss
---
### 🔍 Schritt 4: **GPLv3 / AGPLv3**
#### GPLv3:
* Striktes Copyleft
* Alles, was du daraus baust oder veränderst, **muss auch GPL werden**, sobald du es weitergibst
* **Keine Wirkung bei SaaS**
#### AGPLv3:
* Wie GPLv3, aber **greift auch bei SaaS**
* Nutzer über das Netzwerk = „Verbreitung“ → Quellcodepflicht
✅ Ideal für: Projekte, bei denen sichergestellt werden soll, dass **jede Nutzung** (auch SaaS) zur Veröffentlichung zwingt
---
### 🎯 Zusammenfassung als Diagramm
```text
⬆ Schutzwirkung / Copyleft
│ 🛡️ AGPLv3 → verteilt + SaaS → Quellcodepflicht
│ 🔐 GPLv3 → verteilt → Quellcodepflicht
│ ⚖️ LGPLv3 → bei Kopplung/Änderung → Quellcodepflicht für die Library
│ 🔓 Apache 2 → freie Nutzung, + Patentlizenz
│ 🔓 MIT → freie Nutzung, einfachste Lizenz
└────────────▶ Freiheit für kommerzielle Nutzung
```
---
### 🧠 Praxis-Tipps
| Ziel | Empfohlene Lizenz |
| -------------------------------------------------------------------------------- | --------------------------- |
| Du willst maximale Freiheit für Nutzer und Firmen | **MIT** oder **Apache 2.0** |
| Du baust eine Library, willst Verlinkung erlauben, aber Änderungen offenlegen | **LGPLv3** |
| Du willst, dass alle Ableitungen Open Source bleiben (außer bei SaaS) | **GPLv3** |
| Du willst, dass niemand deinen Code nutzt, ohne ihn offenzulegen auch bei SaaS | **AGPLv3** |
---
## 'Free software forever'
> Der Claim 'will be free forever' lässt sich also nur mit GPL und AGPL verwirklcihen? verstehe ich das richtig? Oder anders: Warum ist Forgejo in v9 zur GPL gewechselt?
Ja, du verstehst das im Kern **richtig** und wir können den Gedanken noch etwas differenzierter ausleuchten.
---
### 🎯 1. „Will be free forever“ Was bedeutet das konkret?
Das ist **kein juristischer Begriff**, sondern ein ideologischer Anspruch:
> ❝ Jeder, der meine Software nutzt, verändert oder weiterverbreitet, muss sie **frei halten**, so wie ich es getan habe. Niemand soll sie einsperren dürfen. ❞
Damit ist gemeint:
* Der Quellcode bleibt **immer offen** (auch bei Ableitungen)
* Niemand darf durch Ableitungen ein **proprietäres Produkt** daraus machen
* Freiheit für alle Nutzer, **nicht nur für den ersten Entwickler**
Das nennt man **„starkes Copyleft“**, und das bieten:
* **GPL (v2, v3)** schützt vor proprietärer Weitergabe
* **AGPL** schützt vor proprietärer Weitergabe **und** vor „SaaS-Einschluss“ (Cloud-Tricks)
---
### ✅ 2. Welche Lizenzen sichern diese Freiheit tatsächlich?
| Lizenz | „Wird für immer frei bleiben“ (auch bei Ableitungen)? |
| -------------------- | -------------------------------------------------------- |
| **AGPLv3** | ✅ Ja selbst bei SaaS |
| **GPLv3** | ✅ Ja bei jeder Weitergabe |
| **LGPLv3** | 🔸 Teilweise nur bei Änderungen an der LGPL-Komponente |
| **MIT / Apache 2.0** | ❌ Nein Ableitungen dürfen Closed Source sein |
Also: **Nur AGPL/GPL können mit gutem Gewissen sagen: *„Will be free forever.“***
---
### 🧩 3. Warum ist **Forgejo** in Version 9 auf **GPLv3+** gewechselt?
Forgejo ist ein **Fork von Gitea**, und dieser Fork entstand aus **Sorge um Community-Eigentum** und langfristige Offenheit.
Mit dem Wechsel zu GPLv3 wollte das Forgejo-Projekt klarstellen:
> 💬 „Unsere Plattform bleibt auf ewig frei. Niemand wird Forgejo nehmen und ein Closed-Source-Projekt daraus machen können, ohne ihre Änderungen offenzulegen.“
#### Hintergrund:
* Forgejo sieht sich als „**Community-controlled GitHub-Alternative**“
* Die GPL verhindert, dass jemand (z.B. ein Cloud-Anbieter oder eine Firma) Forgejo nutzt, verändert und **nicht wieder zurückgibt**
* Bei MIT oder Apache könnte z.B. jemand Forgejo forken, um daraus ein kommerzielles, geschlossenes DevOps-Portal zu bauen völlig legal.
Mit der **GPL** ist das **nicht erlaubt**, sobald der Code weitergegeben wird (z.B. als Binary oder Quellcode).
Mit der **AGPL** wäre sogar **SaaS-Nutzung geschützt** aber Forgejo hat sich (noch?) nicht für AGPL entschieden.
---
### 🏁 Fazit
* Ja: **Nur GPL und AGPL** gewährleisten, dass der Code „für immer frei“ bleibt
* Forgejo wählte GPL, um:
* Copyleft-Schutz zu aktivieren
* Die Community gegen proprietäre Forks zu schützen
* Den Open-Source-Gedanken auf Dauer zu sichern
Wenn du „Free as in freedom“ **rechtlich absichern willst**, führt an **(A)GPL** kaum ein Weg vorbei.
MIT, Apache und LGPL ermöglichen zwar freie Nutzung, aber **keinen dauerhaften Schutz der Freiheit**.
---
## Forgejo and SaaS/PaaS
> Rückfrage zu "Bei MIT oder Apache könnte z.B. jemand Forgejo forken, um daraus ein kommerzielles, geschlossenes DevOps-Portal zu bauen völlig legal.": Das ist doch mit der GPL möglich, weil sie SaaS/PaaS nicht einschliesst ?!
Sehr gute Rückfrage das ist **genau der Knackpunkt**, den viele übersehen.
---
### 🔍 Ja, du hast recht:
**GPL (auch v3)** schützt **nicht** vor der sogenannten:
> **SaaS-Schlupflücke** (auch „ASP loophole“ genannt)
Das bedeutet konkret:
> Ein Anbieter kann ein GPL-lizenziertes Programm **modifizieren**, es **auf Servern betreiben** (z.B. als DevOps-Plattform), es **niemals weitergeben** und somit **nichts zurückgeben müssen**.
---
### 🔁 Was genau *verbietet* die GPL (v3)?
Sie verpflichtet zur **Offenlegung des Quellcodes**, wenn:
* du das Programm **weiterverbreitest** (als Binary oder Source)
* du es **mit anderer Software kombinierst** (dann unter bestimmten Bedingungen auch)
Aber:
> Wenn du das Programm nur **ausführst** (z.B. auf einem eigenen Server) und **nur als Dienst** anbietest **greift die GPL nicht**.
#### Also:
Mit GPL darf jemand Forgejo **modifizieren**, gehostet als z.B. `SuperForgeDevOps.io`, **ohne** den geänderten Code zu veröffentlichen.
Solange er **nichts weitergibt**, **muss er nichts zurückgeben**.
---
### 🛡️ AGPL schließt diese Lücke
Die **AGPL (Affero GPL)** ist eine Erweiterung der GPLv3, und sie **fügt explizit hinzu**:
> **Wenn du das Programm über ein Netzwerk bereitstellst (z.B. SaaS), musst du auch dann den Sourcecode veröffentlichen.**
Damit ist auch das **Servermodell abgedeckt** kein Entkommen durch Hosting.
---
### 📌 Fazit zur Rückfrage
| Lizenz | Schutz vor Closed-Source-Forks | Schutz vor SaaS-Schlupflücke |
| ---------------- | ------------------------------ | ---------------------------- |
| **MIT / Apache** | ❌ nein | ❌ nein |
| **GPLv3** | ✅ ja (bei Weitergabe) | ❌ nein (SaaS erlaubt) |
| **AGPLv3** | ✅ ja | ✅ ja |
➡️ **Forgejo mit GPLv3** schützt vor klassischen **Closed-Source-Distributionen**,
aber **nicht** vor der **„wir hosten es einfach selbst“-Strategie**.
Wenn Forgejo also „vollständige Freiheitserhaltung“ auch im SaaS-Kontext will,
müsste es auf **AGPL** umsteigen.
## Forgejo and SaaS/PaaS - II
> Also war deine Analyse oben in Punkt 3 "Warum ist Forgejo in Version 9 auf GPLv3+ gewechselt?" noch nicht ganz korrekt ... d.h. Forgejo lässt diese Möglichkeit offen (ein Portal auf eines PaaS zu bauen, so wie ich da plane)
Ganz genau **sehr präzise erkannt**.
---
### 🛠️ Korrektur & Präzisierung von Punkt 3:
Meine Aussage in Punkt 3 war **inhaltlich nicht falsch**, aber **nicht vollständig**.
Ich hatte gesagt:
> „Mit der GPL ist das nicht erlaubt, sobald der Code weitergegeben wird (z.B. als Binary oder Quellcode).“
Das stimmt **aber nur bei Weitergabe**.
Ich hätte explizit hinzufügen sollen:
> 🔸 **Die GPL erlaubt es jedoch, modifizierte Versionen privat als Service zu betreiben ohne Quellcodefreigabe.**
---
### 🤝 Richtigstellung in Bezug auf Forgejo:
* Mit der Wahl von **GPLv3+** (nicht AGPL) hat Forgejo:
* ✅ *Copyleft* gegen klassische Forks durch Distribution geschützt
* ❌ *Keinen Schutz* gegen **SaaS-only Forks** geschaffen
Das heißt für dich:
> Du darfst Forgejo modifizieren (inkl. Frontend, z.B. zu einem Portal), und **solange du P nur als PaaS anbietest**, **bist du GPL-konform** **auch wenn du den Quellcode deiner Erweiterungen nicht veröffentlichst**.
---
### Warum dann überhaupt GPL und nicht MIT?
Forgejo wollte vermutlich:
* den **klassischen Missbrauch** (Closed-Source-Forks, OEMs, Redistribution als Produkt) **verhindern**
* aber trotzdem eine gewisse **SaaS-Offenheit** erlauben, z.B.:
* für Hoster
* für Unternehmen, die Forgejo intern/extern als Plattform verwenden
* für Partner wie dich, die es als Basis in Produkten einsetzen
Eine **AGPL** hätte potenziell:
* zu mehr **rechtlicher Unsicherheit** geführt
* Hosting-Anbieter **abgeschreckt**
* weniger Verbreitung ermöglicht
Das ist oft ein **strategischer Trade-off**:
**Mehr Kontrolle** (AGPL) vs. **mehr Adaption & Akzeptanz** (GPL).
---
### 🔍 Fazit zu deiner Einschätzung:
> **„Forgejo lässt die Möglichkeit offen, ein Portal auf PaaS-Basis zu bauen.“**
✅ **Richtig.**
Solange du nichts **weitergibst** (Binary, Container, Installationspaket), bist du **nicht verpflichtet**, den Quellcode deiner Erweiterungen zu veröffentlichen trotz GPLv3.
---

View file

@ -1,6 +0,0 @@
title: Concepts
arrange:
- overview.md
- general
- customer-developer
- edp-developer

View file

Before

Width:  |  Height:  |  Size: 944 KiB

After

Width:  |  Height:  |  Size: 944 KiB

View file

Before

Width:  |  Height:  |  Size: 113 KiB

After

Width:  |  Height:  |  Size: 113 KiB

View file

@ -1,8 +1,7 @@
---
title: Overview
title: Concepts
weight: 1
description: The underlying platforming concepts of the Edge Developer Framework (EDF) solution, i.e. the problem domain
---
The underlying platforming concepts of the Edge Developer Framework (EDF) solution, i.e. the problem domain

View file

@ -1,10 +0,0 @@
title: edgeDeveloper Framework
arrange:
- storyline.md
- introduction.md
- edge-developer-framework.md
- platforming.md
- orchestrators.md
- cnoe.md
- cnoe-showtime.md
- conclusio.md

View file

@ -1,3 +0,0 @@
{
"name": "project-management"
}

View file

@ -1,7 +0,0 @@
---
title: General Concepts
weight: 1
description: The underlying platforming concepts of the Edge Developer Framework (EDF) solution, i.e. the problem domain
---
The underlying platforming concepts of the Edge Developer Framework (EDF) solution, i.e. the problem domain

View file

@ -1,163 +0,0 @@
## CNOE Backstage commit history
This page describes the features and changes added to CNOE Backstage, along with a brief summary of what was done in each commit.
Commits and changes that are important for implementation of the EDP Backstage will be marked with <span style="color:red"> red </span> color and commits
that may be relevant, but not in priority, will be marked with <span style="color:green"> green </span> color. Commits are listed from newest to oldest.
### <span style="color:green">№19. Commit name: Imported roadiehq http request plugin </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/135c0cb26f3e004a27a11edb6a4779035aff9805)<br>
This commit adds the `@roadiehq/scaffolder-backend-module-http-request` plugin. This plugin provides Scaffolder actions
that allow sending web requests within Backstage templates.
[Description and usage examples](https://roadie.io/backstage/plugins/scaffolder-http-requests/)
[RoadieHQ's Backstage plugins repository](https://github.com/RoadieHQ/roadie-backstage-plugins)
#### Code changes:
The dependency was added to `package.json`:
```
"@roadiehq/scaffolder-backend-module-http-request": "^4.3.5"
```
This means the project now includes this module and can import it in the code.
The module was registered in `packages/backend/src/index.ts`:
```
backend.add(
import('@roadiehq/scaffolder-backend-module-http-request/new-backend'),
);
```
This integrates the module into the Backstage backend.
<span style="color:green">This commit introduces functionality that may be useful for extending our Backstage templates with web request capabilities </span>
### №18. Commit name: add mkdoc to docker image
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/9232d633b2698fffa6d0a73b715e06640d170162)<br>
In this commit, several changes were made to the Dockerfile:
1. Mkdoc was added to the Dockerfile as part of the build process, enabling seamless integration for generating and managing project documentation during the containerization process. [Information about mkdocs](https://www.mkdocs.org/)
2. Refactoring of the Dockerfile. Optimization of the build structure
### №17. Commit name: move argo workflows to ci/cd tab. remove unused components
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/e8b84831e99f13033beb11530149fbb24d846f29)<br>
In this commit no new functionality was added. Refactoring commit
### №16. Commit name: Updated argoworkflows to query workflows based on component annotation
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/5f3a54f571ea6f210ebb3418611e1ac4e6e3e7c5)<br>
This commit is mostly refactoring
1. Refined the structure of the `argoWorkflowsPlugin` by updating `createPlugin` and `createRoutableExtension` definitions. <br>Files changed: `plugins/argo-workflows/src/plugin.ts` and `plugins/argo-workflows/src/api/index.ts`
2. Reformatted and standardized code style
### <span style="color:green">№15. Commit name: Extended plugin to support more k8s objects </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/2cde0a09caf55fe907463b0b8b4a6321482b322e)<br>
This commit contains improvements to the plugin for working with k8s. All changes are in file `packages/backend/src/plugins/k8s-apply.ts`
#### Changes:
1. <b>Manifest Handling Optimization</b>. Simplified writing manifest to file based on input type (string or object).
2. <b>Support for Multiple Manifests</b>. Writes each manifest from a list to a separate file with a unique name.
3. <b>Kubernetes Config Generation</b>. Adds logic for creating Kubernetes config with server, certificate, and token details.
4. <b>kubectl Command Execution</b>. Executes kubectl apply or kubectl create for each manifest in the list.
5. <b>Error Handling</b>. Throws an error if a valid cluster name is not specified.
### №14. Commit name: Fix for k8s-apply plugin config issue
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/6329ea80b1b0bb2e22f78333ae6462f147e1f4a1)<br>
This commit contains fixes to the plugin for working with k8s. All changes are in file `packages/backend/src/plugins/k8s-apply.ts`
### <span style="color:red"> №13. Commit name: bump backstage version to 1.28.4 </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/db5d40f2848aaa8ca22a15fbdb7a06cfd986a162)<br>
In this commit, the Backstage version for the build was updated. Along with this, the dependency versions in the
package.json files for Backstage, the Backstage backend, and for the custom CNOE plugins (appache-spark, argo-workflows, cnoe-ui, terraform-backend, and terraform) were also updated.
Additionally, in some places, the plugin imports were replaced with the current ones, such as in the file `packages/app/src/App.tsx`.
<span style="color:red">This commit is important, because here is an example of the update of the backstage version and actualizing of the dependencies </span>
### №12. Commit name: fix typos
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/6f1e1764859ad29c042b9ed015cc71cd8dcc6543)<br>
This commit contains only fixes of the typos.
### №11. Commit name: add actions for PRs
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/2361b299f2291f55062fde61afd06dbf9542fbef)<br>
Added `.github/workflows/pr.yaml` defining actions to run on pull request events. The workflow includes steps for checking out the repository, setting up Node.js, installing dependencies, and running TypeScript checks.
### №10. Commit name: fix tsc errors
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/d328b6986b4721b31c606e1da3ba93afce1864b7)<br>
Bugfixes and improvements of the logging and tests for the custom `terraform-backend` plugin of the CNOE.
### №9. Commit name: Add terraform plugin into backstage-app
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/10b78fca7a474b24b5e8124697a01bdf76b152ca)<br>
In this commit, Terraform was integrated into CNOE Backstage. To achieve this, two custom plugins, `terraform` and `terraform-backend`, were created in the `/plugins` directory.
#### The following steps were taken to integrate these plugins into Backstage:
1. The Terraform plugin was added as an internal dependency (`"@internal/plugin-terraform"`) in the project's dependencies file (`packages/app/package.json`).
2. In `packages/app/src/App.tsx`, the Terraform Page component was imported from the plugin, and a route for this page was created with the path `/terraform`.
3. In the entity page component (`packages/app/src/components/catalog/EntityPage.tsx`), the Terraform plugin component was imported and integrated.
4. The Terraform backend plugin (`"@internal/backstage-plugin-terraform-backend"`) was added as an internal dependency in the Backstage backend dependencies file (`packages/backend/package.json`).
5. In the Backstage backend creation file (`packages/backend/src/index.ts`), the plugin was imported and added to the backend object.
### <span style="color:red"> №8. Commit name: upgrade to backstage 1.26.5 </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/c2ff2abd11c6f719c51b50d04878b108ba70d40e)<br>
The name of this commit is misleading because, in addition to updating the Backstage version and actualizing dependency files and imports, several other changes were made.
#### Changes besides version update:
1. Fully rewritten Backstage backend creation (`packages/backend/src/index.ts`). After the changes, the backend is now created using the createBackend function from the `@backstage/backend-defaults` plugin.
2. <span style="color:red"> Keycloak OIDC </span> <br>The authentication mechanism has been rewritten using Keycloak OIDC (<span style="color:red">file location: packages/backend/src/plugins/auth.ts</span>). The object responsible for authentication is named `authModuleKeycloakOIDCProvider`.
3. <span style="color:red"> Custom CNOE Scaffolder Actions (for Gitea and ArgoCD) </span> <br> Mechanism for creating and adding custom CNOE Scaffolder Actions, which are used as actions for Backstage templates, has been rewritten (<span style="color:red">file location: packages/backend/src/plugins/scaffolder.ts</span>)
<span style="color:red">This commit is important, because here we can find locations of the CNOE custom actions for templates and implemintation of the keycloak auth for Backstage </span>
### №7. Commit name: update readme
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/b8e4f08914af17a48ed6b8b83a3621a9f4b4181d)<br>
In this commit only README was updated
### <span style="color:green"> №6. Commit name: use cnoe theme </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/12eef8100d2521a6f665ef603ebe8196b12c8e96)<br>
In this commit design of user interface was updated and integrated into the Backstage app
#### Changes:
1. Interface styles were update in plugin cnoe-ui in files: `plugins/cnoe-ui/src/components/themes/light-theme.ts` and `plugins/cnoe-ui/src/components/themes/dark-theme.ts`
2. UI style is connected to Backstage in file `packages/app/src/App.tsx`
3. Routs to CNOE logo for interface are created in file `packages/app/src/components/Root/Root.tsx`
<span style="color:green"> This commit could be intersting for the team, because here we can see how to connect custom ui to backstage and how to remove CNOE logo </span>
### <span style="color:red"> №5. Commit name: Add workflow to automate backstage builds </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/9ee3514e51c1a354b7fe85a90117faf8328bfa0b)<br>
In this commit were added github workflow for building backstage image and Dockerfile (files: `.github/workflows/build-and-push.yaml` and `Dockerfile`)
### <span style="color:red"> №4. Commit name: Include plugin scaffolder actions directly in src </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/139a3674c035bfe9f486f50aa8cb3deee8b25fd5)<br>
#### Changes:
1. Test Backstage templates were created
2. `k8s-apply` plugin were added to backend of the Backstage (file: `packages/backend/src/plugins/k8s-apply.ts`)
3. `sanitize` plugin were added to backend of the Backstage (file: `packages/backend/src/plugins/sanitize.ts`)
4. `scaffolder` plugin were added to backend of the Backstage (file: `packages/backend/src/plugins/scaffolder.ts`)
5. `verify` plugin were added to backend of the Backstage (file: `packages/backend/src/plugins/verify.ts`
<span style="color:red">This commit is important, because we will need scaffolder plugin in EDP backstage for using actions in backstage templates</span>
### <span style="color:red"> №3. Commit name: working integrations </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/4b61eaef5920da8b0110af8e6f2997354b8af63a)<br>
#### Changes:
1. Created custom CNOE plugin for integration with apache-spark (`/plugins/appache-spark`)
2. Created custom CNOE plugin for integration with apache-spark (`/plugins/argo-workflows`)
3. Created custom CNOE plugin for integration with apache-spark (`/plugins/cnoe-ui`)
4. In Backstage backend in file with custom gitea actions (<span style="color:red"> packages/backend/src/plugins/gitea-actions.ts </span>) were commented out: <span style="color:green">checkGiteaContentUrl, checkDurationLimit and checkAvailabilityGiteaRepository </span>
5. Integration with ArgoCD was added into backend of the Backstage as a backend plugin (<span style="color:red"> packages/backend/src/plugins/argocd.ts</span>)
6. In `packages/app/src/App.tsx` `argo-workflows plugin` and `apache-spark` plugin were integrated into Backstage and routs for their pages were created
7. To the backstage component for the catalog items (`packages/app/src/components/catalog/EntityPage.tsx`) were integrated components from `argo-wrkflows` plugin. <br> <span style="color:red"> And was added component EntityArgoCDOverviewCard from @roadiehq/backstage-plugin-argo-cd which is a visual component for showing ArgoCD status of the catalog items, which are registered in ArgoCD.</span> [Link to plugin's repo](https://github.com/RoadieHQ/roadie-backstage-plugins/tree/main/plugins/frontend/backstage-plugin-argo-cd).
8. New plugins were added to the dependencies
<span style="color:red">This commit is important, because here we can find an implementation of the ArgoCD plugin for the Backstage backend and how it's integrated to a backstage. And here we can see integration of the component for showing ArgoCD status from @roadiehq/backstage-plugin-argo-cd plugin
### <span style="color:red"> №2. Commit name: working gitea scaffolding </span>
[Link to a commit](https://github.com/cnoe-io/backstage-app/commit/fe842bed997f317979da7fd42093bc62b3e491b7)<br>
#### Changes:
1. Implemented integration with gitea (<span style="color:red">file: packages/backend/src/plugins/gitea-actions.ts</span>)
2. Implemented integration with the kubernetes (<span style="color:red">file: packages/backend/src/plugins/kubernetes.ts</span>)
<span style="color:red">This commit is important, because here was implemented gitea and kubernetes integration that should be ported to EDP Backstage</span>
### №1. Commit name: Initial commit

View file

@ -1,103 +0,0 @@
---
title: Backstage Update Tutorial
---
The first Backstage update was performed as described in the ticket https://jira.telekom-mms.com/browse/IPCEICIS-2299 from version 1.28 to version 1.36.1.
This document provides a detailed guide on the process to update Backstage locally on your machine from version 1.36.1 to 1.38.1.
Later updates can be performed accordingly. (The fixing of deprecated imports should not be necessary, again.)
## Table of Contents
1. [Prerequisites](#prerequisites)
2. [Setting Up Backstage](#setting-up-backstage)
3. [Updating Backstage](#updating-backstage)
4. [Fix deprecated import](#fix-deprecated-import)
5. [Running Backstage](#running-backstage)
## Prerequisites <a name="prerequisites"></a>
Before you start, make sure you have the following installed on your machine:
1. **Node.js**: Backstage requires Node.js. You can download it from the [Node.js website](https://nodejs.org/).
2. **Yarn**: Backstage uses Yarn as its package manager.
3. **Git**
4. **Docker**
## Setting Up Backstage <a name="setting-up-backstage"></a>
To download the latest version of our Backstage-EDP app, use git to clone it from the OSC repository at https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW-CICD/backstage-edp.git.
Make sure yarn ist installed locally. If it hasn't been installed yet, run in the project path:
```bash
yarn install
```
## Updating Backstage <a name="updating-backstage"></a>
In the project path run:
```bash
yarn backstage-cli versions:bump
```
This will update all relevant components to the newest versions.
## Fix deprecated import <a name="fix-deprecated-import"></a>
In a text editor of your choice open the file <I>'/backstage-edp/packages/backend/src/plugins/proxy.ts'</I> and implement the two following changes.
### Update import
First (due to changes in the library) replace the deprecated import
```typescript
import { createRouter } from '@backstage/plugin-proxy-backend';
```
with the following import:
```typescript
import { createRouter } from '@roadiehq/backstage-plugin-argo-cd-backend';
```
### Update method call createRouter()
Secondly (due to a changed signature) replace the <B>old</B> call of method createRouter()
```typescript
return await createRouter({
logger: env.logger,
config: env.config,
discovery: env.discovery
});
```
with the following <B>new</B> method call:
```typescript
return await createRouter({
logger: env.logger,
config: env.config
});
```
## Running backstage <a name="running-backstage"></a>
After completing all updates and fixes, run backstage to verify everything was updated correctly.
In the project path run:
```bash
docker build --no-cache -t backstage-cnoe .
```
Start the backend from a console:
```bash
yarn run start-backend
```
Start the frontend from a console:
```bash
yarn run start
```
The backstage frontend should now open in your browser.

View file

@ -1,77 +0,0 @@
# Forgejo
## Meta
* https://codeberg.org/forgejo
* https://codeberg.org/forgejo#meta
## Forgejo Development
* https://forgejo.org/docs/v1.21/developer/architecture/
* https://forgejo.org/docs/v1.21/developer/customization/
## UI
* https://codeberg.org/forgejo/discussions/issues/338
## Tasks
Abstimmung mit Micha (Marschner), 06.05.25:
1. Einarbeiten, Development Model, zB. Astro Framework
1. Soft-Forking, Down/Up-Streaming
1. 'Sparring' User experience
Koordination:
* Hinweis: '8 Wochen MVP Forgejo-as-a-Service' ist derzeit der Fokus auf das SaaS in der OTC
* In dieser zeit arbeitet Julius an der UX-Architekturkonzeption
* Michael arbeitet sich ein, Technologie-Stack verstehen
* 'Zum laufen bekommen': Lokales Aufsetzen als Nächstes gemeinsam -> Termin stellt Stephan ein
## Local Development
### method 1: docker compose with forgejo:latest
Method 1: run a latest forgejo with sqlite - configuration is done on first login
* https://forgejo.org/docs/latest/admin/installation-docker/
```bash
docker compose up -d
# open localhost:3000
# check registry
docker login localhost:3000
docker image push localhost:3000/<user>/<image>
```
### method 2: install and develop it from source
* https://codeberg.org/forgejo/forgejo.git
```bash
git clone https://codeberg.org/forgejo/forgejo.git
# enter devcontainer in VSC (takes 10 mins for the first time)
# make build
# then run ./gitea (sic!) --help
# also see ./gitea forgejo-cli actions
```
### method 3: 'setup forgejo' (with runner)
* https://code.forgejo.org/actions/setup-forgejo
Additional links:
* https://brainsteam.co.uk/2024/03/17/moving-from-gitea-to-forgejo-including-actions-with-docker-compose/
* https://code.forgejo.org/forgejo/runner
* https://linus.dev/posts/setting-up-a-self-hosted-forgejo-actions-runner-with-docker-compose/
```bash
git clone https://code.forgejo.org/actions/setup-forgejo.git
```

View file

@ -1,290 +0,0 @@
# Forgejo
In the following you will find
* a lot of interestimg Forgejo links
* to to local setup and local development
* and 4 methods to set up Forgejo locally
* 3 times based only on Docker
* and once only based on Go et.al.
## Local Setup and Development
### Forgejo Releases
* https://forgejo.org/releases/
-> the current release is 11.0
### method 1: docker ( - compose) with forgejo
Method 1 is just run the or a docker image.
To be more comfortable in the parameters we can use a docker-compose file.
run a latest forgejo with sqlite - configuration is done on first login
* https://forgejo.org/docs/latest/admin/installation-docker/
#### prepare and investigate the image
```bash
docker pull codeberg.org/forgejo/forgejo:11
docker inspect codeberg.org/forgejo/forgejo:11
# check cmd and entrypoint
# "Env": [
# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
# "USER=git",
# "GITEA_CUSTOM=/data/gitea"
# ],
# "Cmd": [
# "/usr/bin/s6-svscan",
# "/etc/s6"
# ],
# "WorkingDir": "/",
# "Entrypoint": [
# "/usr/bin/entrypoint"
# ],
# run the image with a cli
docker run -it --entrypoint="" codeberg.org/forgejo/forgejo:11 bash
# in the container
cat /usr/bin/entrypoint
# if [ $# -gt 0 ]; then
# exec "$@"
# else
# exec /usr/bin/s6-svscan /etc/s6
# fi
# check /etc/s6
tree /etc/s6/
# /etc/s6/
# ├── gitea
# │ ├── finish
# │ ├── run
# │ └── setup
# └── openssh
# ├── finish
# ├── run
# └── setup
cat /etc/s6/gitea/run
# #!/bin/bash
# [[ -f ./setup ]] && source ./setup
# pushd /app/gitea >/dev/null
# exec su-exec $USER /usr/local/bin/gitea web
# popd
cat /etc/s6/gitea/setup
# #!/bin/bash
# if [ ! -d /data/git/.ssh ]; then
# mkdir -p /data/git/.ssh
# fi
# # Set the correct permissions on the .ssh directory and authorized_keys file,
# # or sshd will refuse to use them and lead to clone/push/pull failures.
# # It could happen when users have copied their data to a new volume and changed the file permission by accident,
# # and it would be very hard to troubleshoot unless users know how to check the logs of sshd which is started by s6.
# chmod 700 /data/git/.ssh
# if [ -f /data/git/.ssh/authorized_keys ]; then
# chmod 600 /data/git/.ssh/authorized_keys
# fi
# if [ ! -f /data/git/.ssh/environment ]; then
# echo "GITEA_CUSTOM=$GITEA_CUSTOM" >| /data/git/.ssh/environment
# chmod 600 /data/git/.ssh/environment
# elif ! grep -q "^GITEA_CUSTOM=$GITEA_CUSTOM$" /data/git/.ssh/environment; then
# sed -i /^GITEA_CUSTOM=/d /data/git/.ssh/environment
# echo "GITEA_CUSTOM=$GITEA_CUSTOM" >> /data/git/.ssh/environment
# fi
# if [ ! -f ${GITEA_CUSTOM}/conf/app.ini ]; then
# mkdir -p ${GITEA_CUSTOM}/conf
# # Set INSTALL_LOCK to true only if SECRET_KEY is not empty and
# # INSTALL_LOCK is empty
# if [ -n "$SECRET_KEY" ] && [ -z "$INSTALL_LOCK" ]; then
# INSTALL_LOCK=true
# fi
# # Substitute the environment variables in the template
# APP_NAME=${APP_NAME:-"Forgejo: Beyond coding. We forge."} \
# RUN_MODE=${RUN_MODE:-"prod"} \
# DOMAIN=${DOMAIN:-"localhost"} \
# SSH_DOMAIN=${SSH_DOMAIN:-"localhost"} \
# HTTP_PORT=${HTTP_PORT:-"3000"} \
# ROOT_URL=${ROOT_URL:-""} \
# DISABLE_SSH=${DISABLE_SSH:-"false"} \
# SSH_PORT=${SSH_PORT:-"22"} \
# SSH_LISTEN_PORT=${SSH_LISTEN_PORT:-"${SSH_PORT}"} \
# LFS_START_SERVER=${LFS_START_SERVER:-"false"} \
# DB_TYPE=${DB_TYPE:-"sqlite3"} \
# DB_HOST=${DB_HOST:-"localhost:3306"} \
# DB_NAME=${DB_NAME:-"gitea"} \
# DB_USER=${DB_USER:-"root"} \
# DB_PASSWD=${DB_PASSWD:-""} \
# INSTALL_LOCK=${INSTALL_LOCK:-"false"} \
# DISABLE_REGISTRATION=${DISABLE_REGISTRATION:-"false"} \
# REQUIRE_SIGNIN_VIEW=${REQUIRE_SIGNIN_VIEW:-"false"} \
# SECRET_KEY=${SECRET_KEY:-""} \
# envsubst < /etc/templates/app.ini > ${GITEA_CUSTOM}/conf/app.ini
# chown ${USER}:git ${GITEA_CUSTOM}/conf/app.ini
# fi
# # Replace app.ini settings with env variables in the form GITEA__SECTION_NAME__KEY_NAME
# environment-to-ini --config ${GITEA_CUSTOM}/conf/app.ini
# # only chown if current owner is not already the gitea ${USER}. No recursive check to save time
# if ! [[ $(ls -ld /data/gitea | awk '{print $3}') = ${USER} ]]; then chown -R ${USER}:git /data/gitea; fi
# if ! [[ $(ls -ld /app/gitea | awk '{print $3}') = ${USER} ]]; then chown -R ${USER}:git /app/gitea; fi
# if ! [[ $(ls -ld /data/git | awk '{print $3}') = ${USER} ]]; then chown -R ${USER}:git /data/git; fi
# chmod 0755 /data/gitea /app/gitea /data/git
```
#### Outcome
The container will run
* /usr/local/bin/gitea web
* with USER=git
* and DB_TYPE=${DB_TYPE:-"sqlite3"
#### prepare the docker compose file
From the [link above](https://forgejo.org/docs/latest/admin/installation-docker/) copy and create a docker-compose.yml like this:
```bash
echo $"
networks:
forgejo:
external: false
services:
server:
image: codeberg.org/forgejo/forgejo:11
container_name: forgejo
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- forgejo
volumes:
- ./forgejo:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- '3000:3000'
- '222:22'
" >> docker-compose.yml
```
### Run the container and open the WebUI
```bash
docker compose up -d
# open http://localhost:3000
### test: Check the registry
# check registry
docker login localhost:3000
docker image push localhost:3000/<user>/<image>
```
### method 2: install and develop it from source
The next real 'do development' setup method is by running the source.
* https://codeberg.org/forgejo/forgejo.git
```bash
git clone https://codeberg.org/forgejo/forgejo.git
# enter devcontainer in VSC (takes 10 mins for the first time)
# make build
# then run ./gitea (sic!) --help
# also see ./gitea forgejo-cli actions
```
### method 3: setup (a bit) declarativly - Forgejo and Forgejo Runner
#### The kubernetes operator way
The highly sophisticated way to do a declarative setup is already provided by the [Forgejo-Operator](https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW/forgejo-operator) running your desired forgejo and forgejo runner state by an operator as custom ressources.
#### The docker-compose way
You can also run both [Forgejo and Forgejo Runner the pure docker way.](https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/stephan.lo/stl-edp/src/branch/development/09-forgejo)
```bash
git clone https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/stephan.lo/stl-edp.git
git checkout development
cd 09-forgejo
# spin up
# ... first check and understand the content of the script !
./spin-up.sh
# destroy
./teardown.sh
```
Also have a look into the linsk provided in the README, especially https://codeberg.org/forgejo-contrib/delightful-forgejo
### method 4: 'setup forgejo' (with runner)
Really funny and promisingly looking high-end-community-development stuff is this:
* https://code.forgejo.org/actions/setup-forgejo
```bash
git clone https://code.forgejo.org/actions/setup-forgejo.git
```
## References
### Meta
* https://codeberg.org/forgejo
### Forgejo Development
* https://forgejo.org/docs/v1.21/developer/architecture/
* https://forgejo.org/docs/v1.21/developer/customization/
### UI
* https://codeberg.org/forgejo/discussions/issues/338
### Some nice Setup references
* https://brainsteam.co.uk/2024/03/17/moving-from-gitea-to-forgejo-including-actions-with-docker-compose/
* https://code.forgejo.org/forgejo/runner
* https://linus.dev/posts/setting-up-a-self-hosted-forgejo-actions-runner-with-docker-compose/
### Further [curated] references
* [ArchWiki](https://wiki.archlinux.org/title/Forgejo)
* [Forgejo Command Line and Cli](https://forgejo.org/docs/latest/admin/command-line/)
* [Interface Costomization](https://forgejo.org/docs/next/contributor/customization/)
* [From soft to hard fork from Gitea](https://forgejo.org/docs/next/contributor/customization/): In Feb 2024 it was decided to stop downward compatibility with Gitea
* [Drastic UI consolidation in v8.0](https://forgejo.org/2024-07-release-v8-0/)

View file

@ -1,149 +0,0 @@
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (X11; Linux x86_64; rv:135.0) Gecko/20100101 Firefox/135.0" version="26.1.0">
<diagram name="Page-1" id="R-WNyvjY5vV_Vzrg3hek">
<mxGraphModel dx="2534" dy="835" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1100" pageHeight="850" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="PfNbgjtPqerxexKKpkkC-71" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="-20" y="10" width="1420" height="830" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-2" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;Docker w&lt;/font&gt;&lt;font style=&quot;font-size: 20px;&quot;&gt;ithout service network&lt;/font&gt;" style="text;strokeColor=none;fillColor=none;html=1;fontSize=24;fontStyle=1;verticalAlign=middle;align=left;" parent="1" vertex="1">
<mxGeometry y="40" width="330" height="30" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-3" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;./edpbuilder.sh --type kind --stacks all \&lt;br&gt;&amp;nbsp; --domain factory-192-168-198-2.traefik.me \&lt;br&gt;&amp;nbsp; --domain-gitea gitea-factory-192-168-198-2.traefik.me&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry y="170" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-7" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;EDP&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry y="120" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-8" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.198.2&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="180" y="120" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-12" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;./docker-proxy-start.sh&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="350" y="170" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-13" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;Docker Cache Proxy&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="350" y="120" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-14" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.198.3&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="530" y="120" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-15" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;./edpbuilder.sh --type kind --stacks core \&lt;br&gt;&amp;nbsp; --domain factory-192-168-198-4.traefik.me \&lt;br&gt;&amp;nbsp; --domain-gitea gitea-factory-192-168-198-4.traefik.me&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="700" y="170" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-16" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;Foundry&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="700" y="120" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-17" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.198.4&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="880" y="120" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-18" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;./edpbuilder.sh --type kind --stacks all \&lt;br&gt;&amp;nbsp; --domain factory-192-168-198-5.traefik.me \&lt;br&gt;&amp;nbsp; --domain-gitea gitea-factory-192-168-198-5.traefik.me&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="1050" y="170" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-19" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;EDP&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="1050" y="120" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-20" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.198.5&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="1230" y="120" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-41" value="&lt;font style=&quot;font-size: 20px;&quot;&gt;Docker w&lt;/font&gt;&lt;font style=&quot;font-size: 20px;&quot;&gt;ith service network&lt;/font&gt;" style="text;strokeColor=none;fillColor=none;html=1;fontSize=24;fontStyle=1;verticalAlign=middle;align=left;" parent="1" vertex="1">
<mxGeometry y="320" width="330" height="30" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-42" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry y="450" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-43" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;EDP&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry y="400" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-44" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.198.2&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="180" y="400" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-45" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;./docker-proxy-start.sh&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="350" y="450" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-46" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;Docker Cache Proxy&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="350" y="400" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-47" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.198.3&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="530" y="400" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-48" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="700" y="450" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-49" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;Foundry&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="700" y="400" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-50" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.198.4&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="880" y="400" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-51" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="1050" y="450" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-52" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;EDP&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="1050" y="400" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-53" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.198.5&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="1230" y="400" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-54" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;./edpbuilder.sh --type kind --stacks all \&lt;br&gt;&amp;nbsp; --domain factory-192-168-197-3.traefik.me \&lt;br&gt;&amp;nbsp; --domain-gitea gitea-factory-192-168-197-3.traefik.me&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="350" y="710" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-67" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" source="PfNbgjtPqerxexKKpkkC-55" target="PfNbgjtPqerxexKKpkkC-42" edge="1">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="530" y="590" />
<mxPoint x="165" y="590" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-55" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;EDP&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="350" y="660" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-56" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.197.3&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="530" y="660" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-57" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;./edpbuilder.sh --type kind --stacks&amp;nbsp;&lt;/font&gt;&lt;font style=&quot;font-size: 16px;&quot;&gt;core&lt;/font&gt;&lt;font style=&quot;font-size: 16px;&quot;&gt; \&lt;br&gt;&amp;nbsp; --domain factory-192-168-197-2.traefik.me \&lt;br&gt;&amp;nbsp; --domain-gitea gitea-factory-192-168-197-2.traefik.me&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry y="710" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-58" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;Foundry&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry y="660" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-59" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.197.2&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="180" y="660" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-60" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;./edpbuilder.sh --type kind --stacks all \&lt;br&gt;&amp;nbsp; --domain factory-192-168-197-4.traefik.me \&lt;br&gt;&amp;nbsp; --domain-gitea gitea-factory-192-168-197-4.traefik.me&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="700" y="710" width="330" height="100" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-61" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;EDP&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="700" y="660" width="180" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-66" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0;exitDx=0;exitDy=0;entryX=0.558;entryY=0.98;entryDx=0;entryDy=0;entryPerimeter=0;" parent="1" source="PfNbgjtPqerxexKKpkkC-62" target="PfNbgjtPqerxexKKpkkC-51" edge="1">
<mxGeometry relative="1" as="geometry">
<mxPoint x="1230" y="560" as="targetPoint" />
<Array as="points">
<mxPoint x="880" y="640" />
<mxPoint x="1234" y="640" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-62" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;192.168.197.4&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="880" y="660" width="150" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-68" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0;exitDx=0;exitDy=0;entryX=0.527;entryY=1.01;entryDx=0;entryDy=0;entryPerimeter=0;" parent="1" source="PfNbgjtPqerxexKKpkkC-59" target="PfNbgjtPqerxexKKpkkC-48" edge="1">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="180" y="620" />
<mxPoint x="874" y="620" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-69" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;KIND Net &lt;font&gt;192.168.198.0/24&lt;/font&gt;&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="860" y="10" width="270" height="50" as="geometry" />
</mxCell>
<mxCell id="PfNbgjtPqerxexKKpkkC-70" value="&lt;font style=&quot;font-size: 16px;&quot;&gt;Service Net &lt;font&gt;192.168.197.0/25&lt;/font&gt;&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="1130" y="10" width="270" height="50" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

View file

@ -1,198 +0,0 @@
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (X11; Linux x86_64; rv:135.0) Gecko/20100101 Firefox/135.0" version="26.1.0">
<diagram name="Seite-1" id="1aZQnKV8tCKqu9HPROxU">
<mxGraphModel dx="1434" dy="835" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1169" pageHeight="827" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="NgepU7C5GcFRt7vnx28o-1" value="&lt;div&gt;&lt;font style=&quot;font-size: 20px;&quot;&gt;edpbuilder KIND Self-service Network&lt;/font&gt;&lt;/div&gt;" style="text;strokeColor=none;fillColor=none;html=1;fontSize=24;fontStyle=1;verticalAlign=middle;align=center;" parent="1" vertex="1">
<mxGeometry x="352" width="465" height="40" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-7" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="10" y="50" width="1150" height="650" as="geometry" />
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;Device&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-8">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="535" y="50" width="100" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-9" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="50" y="145" width="150" height="145" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-10" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="50" y="390" width="150" height="142.5" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-11" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="230" y="110" width="920" height="570" as="geometry" />
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;Virtual Machine&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-12">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="610" y="110" width="160" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-13" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="250" y="145" width="150" height="145" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-14" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="250" y="310" width="150" height="140" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-15" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="250" y="512.5" width="150" height="147.5" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-22" value="" style="endArrow=none;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-50" target="NgepU7C5GcFRt7vnx28o-52" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-23" value="Docker Service Network" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="480" y="292.5" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-24" value="&lt;div&gt;Docker Host&lt;/div&gt;&lt;div&gt;Network&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
<mxGeometry x="480" y="452.5" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-25" value="" style="endArrow=none;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-54" target="NgepU7C5GcFRt7vnx28o-23" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-26" value="" style="endArrow=none;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-52" target="NgepU7C5GcFRt7vnx28o-23" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-27" value="" style="endArrow=none;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-56" target="NgepU7C5GcFRt7vnx28o-24" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-28" value="" style="endArrow=none;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-45" target="NgepU7C5GcFRt7vnx28o-24" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-29" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="640" y="160" width="500" height="500" as="geometry" />
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;Docker&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-30">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="835" y="160" width="110" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-48" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="670" y="215" width="210" height="395" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-31" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="660" y="227.5" width="210" height="395" as="geometry" />
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;KIND Cluster&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-32">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="700" y="227.5" width="135" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-33" value="Service Net" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="700" y="292.5" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-34" value="&lt;div&gt;Docker KIND&lt;/div&gt;&lt;div&gt;Network&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="955" y="512.5" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-35" value="" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="910" y="267.5" width="210" height="180" as="geometry" />
</mxCell>
<UserObject label="&lt;div&gt;&lt;font style=&quot;font-size: 16px;&quot;&gt;Docker Proxy Cache&lt;/font&gt;&lt;/div&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-36">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="920" y="267.5" width="190" height="42.5" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-37" value="KIND Net" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="955" y="347.5" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-38" value="Host Port" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
<mxGeometry x="705" y="402.5" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-39" value="KIND Net" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="705" y="512.5" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-40" value="" style="endArrow=none;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-39" target="NgepU7C5GcFRt7vnx28o-34" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-41" value="" style="endArrow=none;html=1;rounded=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-34" target="NgepU7C5GcFRt7vnx28o-37" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-42" value="" style="endArrow=none;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-24" target="NgepU7C5GcFRt7vnx28o-38" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-43" value="" style="endArrow=none;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="NgepU7C5GcFRt7vnx28o-23" target="NgepU7C5GcFRt7vnx28o-33" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="740" y="350" as="sourcePoint" />
<mxPoint x="790" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;kubectl&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-44">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="275" y="512.5" width="100" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-45" value="VM Host IP" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
<mxGeometry x="265" y="572.5" width="120" height="60" as="geometry" />
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;Browser&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-49">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="75" y="145" width="100" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-50" value="VM IP" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
<mxGeometry x="65" y="207.5" width="120" height="60" as="geometry" />
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;Tiny Proxy&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-51">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="270" y="145" width="100" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-52" value="Service Net" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="265" y="207.5" width="120" height="60" as="geometry" />
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;Browser&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-53">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="275" y="310" width="100" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-54" value="Service Net" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="265" y="370" width="120" height="60" as="geometry" />
</mxCell>
<UserObject label="&lt;font style=&quot;font-size: 16px;&quot;&gt;kubectl&lt;/font&gt;" placeholders="1" name="Variable" id="NgepU7C5GcFRt7vnx28o-55">
<mxCell style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;overflow=hidden;fontSize=20;" parent="1" vertex="1">
<mxGeometry x="75" y="392.5" width="100" height="40" as="geometry" />
</mxCell>
</UserObject>
<mxCell id="NgepU7C5GcFRt7vnx28o-56" value="VM IP" style="shape=process;whiteSpace=wrap;html=1;backgroundOutline=1;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
<mxGeometry x="65" y="455" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-59" value="&lt;div align=&quot;right&quot;&gt;VM IP: e.g. 192.168.196.97&lt;/div&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#ffe6cc;strokeColor=#d79b00;" parent="1" vertex="1">
<mxGeometry x="65" y="720" width="510" height="40" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-60" value="&lt;div align=&quot;right&quot;&gt;VM Host IP: 127.0.0.1, host.docker.internal&lt;/div&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
<mxGeometry x="65" y="780" width="510" height="40" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-61" value="Service Net: static 192.168.197.0/25" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="595" y="720" width="510" height="40" as="geometry" />
</mxCell>
<mxCell id="NgepU7C5GcFRt7vnx28o-62" value="&lt;div align=&quot;right&quot;&gt;KIND Net: DHCP 192.168.198.0/24&lt;/div&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="595" y="780" width="510" height="40" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

View file

@ -1,170 +0,0 @@
# Host to Kind routing
When we subnetwork inside a VM (e.g. WSL), you won't get a connection from the host (e.g. Windows) to the kind network inside the VM.
### tldr;
Add a route in windows to your docker network (e.g. 192.168.199.0/24) over the vm network connector:
```powershell
# in windows admin mode
# 192.168.199.0/24: the network you want to route to, here: the dockernetwork inside vm
# 172.29.216.239 : the router address which routes the above network, here: the gateway inside the vm to windows
PS C:\Users\stl> route add 192.168.199.0/24 172.29.216.239
```
#### Outcome
Now in windows you can reach Docker network addresses inside your VM:
```powershell
PS C:\Users\stl> ping 192.168.199.33
Ping wird ausgeführt für 192.168.199.33 mit 32 Bytes Daten:
Antwort von 192.168.199.33: Bytes=32 Zeit<1ms TTL=64
```
## Intro
So let' say you created a edp setup by
```bash
# in WSL
$ ./edpbuilder.sh --type kind --stacks all --domain client-192-168-199-35.traefik.me --domain-gitea gitea-client-192-168-199-35.traefik.me
```
you will not be able to send tcp/ip packets from the host (windows) to the kind network gateway, which is inside the docker network of your vm:
```powershell
# in windows
PS C:\Users\stl> ping gitea-client-192-168-199-35.traefik.me
Ping wird ausgeführt für gitea-client-192-168-199-35.traefik.me [192.168.199.35] mit 32 Bytes Daten:
Zeitüberschreitung der Anforderung.
```
## Goal: Windows can access EDP
So what we want is a situation like the following:
In the following screenshot we have at left a browser in windows, and at the right a terminal in wsl. In both a request to `client-192-168-199-35.traefik.me`is working:
![alt text](windows.png)
## Setup Route from windows to WSL
What we need is a route from windows to the docker containers inside the WSL.
So first check your docker network address:
```bash
# in wsl
$ ip r
default via 172.29.208.1 dev eth0 proto kernel
172.29.208.0/20 dev eth0 proto kernel scope link src 172.29.216.239
192.168.199.0/28 dev docker0 proto kernel scope link src 192.168.199.1
192.168.199.32/27 dev br-8e96da84337e proto kernel scope link src 192.168.199.33
```
What you see is
* the network connection to the host with the gateway `172.29.216.239`
* the docker network `192.168.199.0/28` ranging from 192.168.199.1 to 192.168.199.14 (28 = 255.255.240.0)
* and the kind network `192.168.199.32/27` ranging from 192.168.199.33 to 192.168.199.62 (27 = 255.255.224).
In Windows we see that the docker network is reachabel via gateway `172.29.208.1` which is inside network `172.29.208.0/20`:
```powershell
PS C:\Users\stl> ipconfig
...
Ethernet-Adapter vEthernet (WSL):
Verbindungsspezifisches DNS-Suffix:
IPv4-Adresse . . . . . . . . . . : 172.29.208.1
Subnetzmaske . . . . . . . . . . : 255.255.240.0
Standardgateway . . . . . . . . . :
...
```
## add route
Now we add the route:
```powershell
# in windows
PS C:\Users\stl> route add 192.168.199.0/24 172.29.216.239
OK!
```
and can check it with
```powershell
# in windows
PS C:\Users\stl> route print
...
===========================================================================
Aktive Routen:
Netzwerkziel Netzwerkmaske Gateway Schnittstelle Metrik
0.0.0.0 0.0.0.0 10.34.216.1 10.34.219.176 25
...
192.168.199.0 255.255.255.0 172.29.216.239 172.29.208.1 16
...
===========================================================================
```
and have network `192.168.199.0/24` to be routed by `172.29.216.239` over `172.29.208.1`.
## Test
Now you should be able to ping from windows to wsl:
```powershell
# in windows, send ping
PS C:\Users\stl> ping gitea-client-192-168-199-35.traefik.me
Ping wird ausgeführt für gitea-client-192-168-199-35.traefik.me [192.168.199.35] mit 32 Bytes Daten:
Antwort von 192.168.199.35: Bytes=32 Zeit<1ms TTL=63
Antwort von 192.168.199.35: Bytes=32 Zeit<1ms TTL=63
Antwort von 192.168.199.35: Bytes=32 Zeit<1ms TTL=63
Antwort von 192.168.199.35: Bytes=32 Zeit<1ms TTL=63
Ping-Statistik für 192.168.199.35:
Pakete: Gesendet = 4, Empfangen = 4, Verloren = 0
(0% Verlust),
Ca. Zeitangaben in Millisek.:
Minimum = 0ms, Maximum = 0ms, Mittelwert = 0ms
```
```bash
# in wsl, receive ping
tcpdump -n -i eth0 icmp and src host 172.29.208.1
```
![alt text](pings.png)
## Trouble shooting
If icmp or http doesn't work check that a fw is off:
```bash
# in wsl
sudo ufw diable
```
Also be sure that ip forwarding is on in wsl:
```bash
# in wsl
echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward
```

View file

@ -1,5 +0,0 @@
# Local networking
![](./edpbuilder-networking.drawio.png)
![](./edpbuilder-example-network.drawio.png)

View file

@ -1,25 +0,0 @@
# Localdev Registry
This [repository](https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/DevFW/localdev-registry) provides documentation and code for the usage of docker image pulling caching mechanisms.
## Overview: Usage scenarios
This are two mechanisms: Registry mirroring and registry proxying.
While the first redirects image pull requests to a real registry clone serving OCI requests and thus behaves as a representative, the second justs caches HTTP responses and acts as an object cache speeding up traffic.
As we need these two mechanisms in two settings 'local kind' and 'local docker', there result four scenarios which need to be implemented:
1. kind-mirror
1. kind-proxy-cache
1. docker-mirror
1. docker-proxy-cache
There is information about how to use and deploy these scenarios manually on the local box, and there is code for automating the deployment either by shell commands or in edpbuilder provissioning code.
## How to use this documentation
As of sprint 1, you can
1. read the [conceptual overview](./1-registry-mirror-and-cache-proxy-theory.md)
1. [set up one or a combination of the usage scenarions manually on your box](./2-registry-mirror-and-cache-proxy-manual-installation.md)
Additionally there is a documentation about ['hacking' a local mirror](./3-registry-mirror-and-cache-proxy-hacks.md), if you like to test the mirroring scenarios independently of external mirror registries.

View file

@ -1,200 +0,0 @@
# Introduction
This documentation describes how docker/OCI image pulls on a local linux box can be configured to connect to mirrors or pull through cache proxies.
The audience is developers who want to have faster or more reliable pulls, and want to avoid rate limits from external registries.
## Overview
This documantation has three parts:
1. backgound, (docker) image basics - this file
1. Installation of mirrors and caches
1. some hacks, e.g. a local mirror
## It's all about Processes!
We talk about 'docker images' and the way we 'pull' them from 'registries'.
So let's first go one step back and think about the context we use these terms and why.
The reason is that we want to run containers as processes and that they somehow need to come into life, and images are very low level artifacts on top of running conatiners.
> This is our developer's intention! We want processes running our application code!
![alt text](./img/1-linux-processes.png)
### Spawn processes by different stacks
The funny thing is that however you spawn a process you see it at the end as a normal linux process:
#### Run by a shell
```bash
bash -c 'exec -a my-job-bash sleep infinity' &
```
#### Run by Docker
```bash
docker run --restart=always -d ubuntu bash -c 'exec -a my-job-docker sleep infinity'
```
#### Run by Kubernetes
```bash
kubectl run k8sjob --image=ubuntu -- bash -c 'exec -a my-job-k8s sleep infinity'
```
#### Outcome
Your process list should look sth. like this ! :-)
```bash
~ $ ps ax | grep myjob
22529 pts/1 S 0:00 my-job-bash infinity
23154 ? Ss+ 0:00 my-job-docker infinity
24163 ? Ss 0:00 my-job-k8s infinity
```
### Extra
Try to kill the jobs and look what happens!
## Container Images
In the case of the Docker and Kubernetes 'Orchestrating overhead' we need 'images' as a base for the runtime.
> Hint: You can analyze a local image with https://github.com/containers/skopeo
* https://earthly.dev/blog/docker-image-storage-on-host/
* https://www.freecodecamp.org/news/where-are-docker-images-stored-docker-container-paths-explained/
```bash
~ $ docker image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu latest a04dc4851cbc 3 weeks ago 78.1MB
kindest/node latest 2d9b4b74084a 2 months ago 1.05GB
ghcr.io/catthehacker/ubuntu act-latest 0fbcdbe238bf 2 months ago 1.47GB
registry 2 282bd1664cf1 16 months ago 25.4MB
ubuntu 18.04 f9a80a55f492 21 months ago 63.2MB
act-actions-action1-dockeraction latest f9a80a55f492 21 months ago 63.2MB
rpardini/docker-registry-proxy 0.6.2 6bbe4e47a504 4 years ago 12.3MB
registry.k8s.io/pause latest 350b164e7ae1 10 years ago 240kB
~ $ docker exec -it cluster-with-registry-mirror-control-plane crictl image ls
IMAGE TAG IMAGE ID SIZE
docker.io/kindest/kindnetd v20241212-9f82dd49 d300845f67aeb 39MB
docker.io/kindest/local-path-helper v20241212-8ac705d0 baa0d31514ee5 3.08MB
docker.io/kindest/local-path-provisioner v20241212-8ac705d0 04b7d0b91e7e5 22.5MB
docker.io/library/ubuntu latest a04dc4851cbcb 29.8MB
registry.k8s.io/coredns/coredns v1.11.3 c69fa2e9cbf5f 18.6MB
registry.k8s.io/etcd 3.5.16-0 a9e7e6b294baf 57.7MB
registry.k8s.io/kube-apiserver-amd64 v1.32.0 73afaf82c9cc3 98MB
registry.k8s.io/kube-apiserver v1.32.0 73afaf82c9cc3 98MB
registry.k8s.io/kube-controller-manager-amd64 v1.32.0 f3548c6ff8a1e 90.8MB
registry.k8s.io/kube-controller-manager v1.32.0 f3548c6ff8a1e 90.8MB
registry.k8s.io/kube-proxy-amd64 v1.32.0 aa194712e698a 95.3MB
registry.k8s.io/kube-proxy v1.32.0 aa194712e698a 95.3MB
registry.k8s.io/kube-scheduler-amd64 v1.32.0 faaacead470c4 70.6MB
registry.k8s.io/kube-scheduler v1.32.0 faaacead470c4 70.6MB
registry.k8s.io/pause 3.10 873ed75102791 320kB
```
### Distributable Images are stored in Registries
* Images which not have been built in the local host image store come from registries.
* They are complex compounds where the parts are stored in a `image repository`.
* For now it's not important how they are composed - we are just interested to understand how we pull them.
* **The parts we are pulling is our subject to cache!**
![alt text](img/1-images.png)
### Image Name Syntax
The naming conventions are a bit fuzzy. The best definition is this one:
*`registry/namespace/repo:tag`*
![alt text](./img/1-image-naming-convention.png)
## Engine
Next let's find out which components do the pull, so that we know what needs to be configured to use our mirror and caching components.
Images are prepared and processed by so called 'container engines', so that we have a running container at the end. These engines comply both to the OCI specification (for the underlying runtime), and secondly to the CRI spectification (for Kubernetes, to abstract away the container engine stuff).
### Different possibilities for the engine
<!--https://docs.google.com/presentation/d/1S-JqLQ4jatHwEBRUQRiA5WOuCwpTUnxl2d1qRUoTz5g p.37 -->
![alt text](./img/2-differnt-engines.png)
## OCI/CRI stack
As we want to run kind and docker we focus on engines which are both Docker and Kubernetes compliant.
<!-- https://www.kreyman.de/index.php/others/linux-kubernetes/232-unterschiede-zwischen-docker-containerd-cri-o-und-runc -->
![alt text](./img/docker-containerd-cri-o-und-runc.png)
This is possible by `containerd`, and then we have these two engine stacks with respect to Docker and Kubernetes:
> Originally containerd came from Docker, but it has a Plugin called 'cri-shim'
### Docker - container engine stack
* dockerd - container daemon
* containerd - high-level container runtime
* runc - low-level container runtime
### Kubernetes - container engine stack
* option 1 CRI-O-based:
* CRI-O - high-level container runtime
* runc - low-level container runtime
* option 2 containerd-based:
* containerd - high-level container runtime
* runc - low-level container runtime
The purposeful significance of containerd is also shown [in this even broader picture of tools and runtimes](https://sarusso.github.io/blog/container-engines-runtimes-orchestrators.html):
![alt text](./img/container-engines-runtimes-orchestrators.png)
### Kind uses containerd
The CRI part of the stack is implemented in our Kind and Docker case by containerd (and runc)
```bash
~ $ k get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
cluster-with-registry-mirror-control-plane Ready control-plane 7h4m v1.32.0 172.18.0.4 <none> Debian GNU/Linux 12 (bookworm) 6.8.0-53-generic containerd://1.7.24
```
### How this relates to images
As a side note: How the engine operates on images to pass it to the dring component `runc` is nicely shown here:
<!--https://docs.google.com/presentation/d/1S-JqLQ4jatHwEBRUQRiA5WOuCwpTUnxl2d1qRUoTz5g p.36 -->
![alt text](./img/1-container-engine.png)
## Outcome
In a local
* Linux host setup
with
* Kind-Kubernetes and
* Docker
as container engines we can focus on dockerd and containerd when we want to handle image pulling.
We must also focus on dockerd (not only containerd, although it's also in der docker engine included) as dockerd overwrites the containerd mirror config.
## references
* https://collabnix.com/monitoring-containerd
* https://github.com/containerd/containerd/blob/main/docs/cri/registry.md#configure-registry-credentials-example---gcr-with-service-account-key-authentication
* https://medium.com/@charled.breteche/caching-docker-images-for-local-kind-clusters-252fac5434aa
* https://maelvls.dev/docker-proxy-registry-kind/

View file

@ -1,256 +0,0 @@
# Installation
This documentation describes how docker/OCI image pulls on a local linux box can be configured to connect to mirrors or pull through cache proxies.
The audience is developers who want to have faster or more reliable pulls, and want to avoid rate limits from external registries.
## Introduction
There are four different scenarios, which can be combined arbitrarily:
| Registry type | kind | docker |
| --- | --- | --- |
| mirror | scenario 1 | scenario 3 |
| cache | scenario 2 | scenario 4 |
The scenarios can be combined arbitrarily, but when use use sceario 1 you shouldn't forget to also use the mirror in kind:
| combination | s2 (kind + cache) | s3 (docker + mirror) | s4 (docker + cache) |
| --- | --- | --- | --- |
| s1 (kind + mirror) | Mirror and Cache only for kind, you probably don't use docker too much | Both Docker and Kind are mirrored and dont have a cache | |
| s2 (kind + cache) | | doesn't make sense without s1 | both kind and docker are cached and dont need a mirror, you probably are in free internet |
| s3 (docker + mirror) | | | doesn't make sense without s1 |
## Preliminaries
We will need two container images stored in our container host repo before we do changes to the pull configuration.
So be sure that you have them already successfully pulled, or do it right now:
```bash
# precondition: your current docker config is able to pull images from docker.io
docker pull registry:2
docker pull rpardini/docker-registry-proxy:0.6.2
```
## Scenario 1: Registry Mirror on Kind for MMS company network
### What you need
You need to know the registries you want to mirror, and the address of the mirror, which here will be the MMS artifactory mirror. (remark: see in the last chapter for a test registry as mirror on your box.)
> Hint: Typically only 'docker.io' needs to be mirrored.
### Install
The installation is done by setting the `containerd` configuartion with a registry-mirror entry during kind setup.
```bash
# in MMS:
MIRROR_NAME=common-docker.artifacts.mms-at-work.de
KIND_CLUSTER_NAME=cluster-with-registry-mirror
```
In the follwing kind config we only mirror `docker.io`. You can append [as much mirror-entries as you want[(https://github.com/containerd/containerd/blob/main/docs/cri/registry.md#configure-registry-credentials-example---gcr-with-service-account-key-authentication)].
```bash
cat <<EOF | kind create cluster --name $KIND_CLUSTER_NAME --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["${MIRROR_NAME}"]
EOF
```
> Hint: To get logs in info or debug level see section 'hacks'
> Hint: You can also change to `containerd config in a running node. Then just restart the containerd with `systemctl restart containerd`
## Outcome
Now a typical company network blocker is removed - all containerd's (in every cluster node) knows how to bypass the Dockerhub rate limit by requetsing `docker.io` images first from the mirror:
![alt text](./img/2-scenario-1-kind-mirror.png))
The drawback is that you still need the bandwidth to the mirror. So if you are in homeoffice you still will pull each new image on each new kind cliuster creation over the network.
## Scenario 2: Registry Cache Proxy on Kind
Now it gets more tricky or let's say 'even more local': We will install a [cache proxy](`https://github.com/rpardini/docker-registry-proxy) as docker container process on our host.
### What you need
You need the [Caching proxy](`https://github.com/rpardini/docker-registry-proxy). We will install it on your node.
### Install
```bash
CACHE_PROXY_NAME=docker_registry_proxy
DOCKER_KIND_NETWORK=kind
```
#### Caching Proxy
Run the caching proxy `https://github.com/rpardini/docker-registry-proxy`:
```bash
# we start the caching proxy. It will also cache the mirror, if used
# tip: this container is very long running and stores the cached data in a subfolder from your pwd.
# So it's recommended to start it in a higher level folder.
docker run -itd \
--restart always \
--name $CACHE_PROXY_NAME \
--network $DOCKER_KIND_NETWORK \
--hostname $CACHE_PROXY_NAME \
-p 0.0.0.0:${HOST_PORT:-3128}:3128 \
-e ENABLE_MANIFEST_CACHE=true \
-v $(pwd)/docker_mirror_cache:/docker_mirror_cache \
-v $(pwd)/docker_mirror_certs:/ca \
-e REGISTRIES="$MIRROR_NAME k8s.gcr.io gcr.io quay.io docker.elastic.co" \
rpardini/docker-registry-proxy:0.6.2
```
#### Kind cluster
Next start your kind cluster if not running yet. You also can use the cluster from above with the mirror set.
> When you run a test registry mirror (see section 'hacks'), then you need either to provide the mirror's certificate inside the proxy server or you setup the proxy server with tls_no_verify (`-e VERIFY_SSL=false`). In the former everything will work as suspected, in the latter the mirror will complain about a missing TLS connectivity to the proxy.
#### Configure nodes of the Kind cluster
Now each node's `containerd` needs to be configured to use the Cache proxy on the host:
```bash
#!/bin/sh
# https://github.com/rpardini/docker-registry-proxy#kind-cluster
SETUP_URL=http://$CACHE_PROXY_NAME:3128/setup/systemd
pids=""
for NODE in $(kind get nodes --name "$KIND_CLUSTER_NAME"); do
docker exec "$NODE" sh -c "\
curl $SETUP_URL \
| sed s/docker\.service/containerd\.service/g \
| sed '/Environment/ s/$/ \"NO_PROXY=127.0.0.0\/8,10.0.0.0\/8,172.16.0.0\/12,192.168.0.0\/16\"/' \
| bash" & pids="$pids $!" # Configure every node in background
done
wait $pids # Wait for all configurations to end
```
## Outcome
As the proxy is configured all requests - either mirror or registry - go directly to the proxy.
The proxy checks whether first a mirror is to be connected if the image is not in the cache or is to be updated.
![alt text](./img/2-scenario-2-kind-cache.png))
## Scenario 3: Registry Mirror on Docker
> Be aware that Docker only enables a mirror for `docker.io`
### What you need
You need to know he address of the mirror. Here we reuse the ${MIRROR_NAME} from scenario 2.
Only `docker.io` will be mirrored, this is a restriction of docker.
### Run
```bash
# as root
cat << EOD > /etc/docker/daemon.json
{
"metrics-addr" : "127.0.0.1:9323",
"experimental" : true,
"features": { "buildkit": true },
"registry-mirrors": ["${MIRROR_NAME}"],
"insecure-registries" : []
}
EOD
```
```bash
# as root
systemctl restart docker
```
## Outcome
![alt text](./img/2-scenario-3-docker-mirror.png))
## Scenario 4: Registry Cache Proxy on Docker
Last not least we connect our hosts docker engine to the proxy cache.
### What you need
You need the Caching proxy. We will reuse it from scenario 2.
### Run
We run the same systemd-settings for containerd as we did for containerd in the kind nodes.
```bash
# as root
mkdir -p /etc/systemd/system/docker.service.d
cat << EOD > /etc/systemd/system/docker.service.d/http-proxy.conf
[Service]
Environment="HTTP_PROXY=http://localhost:3128/"
Environment="HTTPS_PROXY=http://localhost:3128/"
Environment="NO_PROXY=localhost,127.0.0.1,gitea.poc.edp.localtest.me"
EOD
curl http://localhost:3128/ca.crt > /usr/share/ca-certificates/docker_registry_proxy.crt
if fgrep -q "docker_registry_proxy.crt" /etc/ca-certificates.conf ; then
echo "certificate refreshed"
else
echo "docker_registry_proxy.crt" >> /etc/ca-certificates.conf
fi
update-ca-certificates --fresh
```
Now reload and restart:
```bash
# as root
systemctl daemon-reload
systemctl restart docker
```
## Outcome
As the proxy is configured all requests - either mirror or registry - go directly to the proxy.
The proxy checks whether first a mirror is to be connected if the image is not in the cache or is to be updated.
![alt text](./img/2-scenario-4-docker-cache.png))
In the `docker info` output you see proxy and mirror setting:
```bash
~ $ docker info
Client: Docker Engine - Community
Version: 27.0.2
Server:
...
HTTP Proxy: http://localhost:3128/
HTTPS Proxy: http://localhost:3128/
No Proxy: localhost,127.0.0.1,gitea.poc.edp.localtest.me
Experimental: true
Insecure Registries:
127.0.0.0/8
Registry Mirrors:
https://registry-1.docker.io.mirror.test/
```

View file

@ -1,179 +0,0 @@
# Hacks
This documentation describes how docker/OCI image pulls on a local linux box can be configured to connect to mirrors or pull through cache proxies.
The audience is developers who want to have faster or more reliable pulls, and want to avoid rate limits from external registries.
This part is called 'hacks' and describes some more hands-on components and investigations on the command line.
## Create an own registry mirror to test a kind mirror setting
May be you don't have or need a mirror, but you would like to run all sceanrios of part 2 and thus need a local mirror.
Or you would like to investigate the handshaking between mirror and cache and thus need the logs of the mirror.
```bash
# the name of our mirror
MIRROR_NAME=registry.docker.io.mirror.test
# the mirror will be accessable by its host name in the kind network
DOCKER_KIND_NETWORK=kind
```
## The registry needs TLS
```bash
# create a temporary directory
mkdir registry-certs
```
```bash
# cert config
cat <<EOF>openssl-${MIRROR_NAME}.cnf
[req]
default_bits = 2048
default_keyfile = domain.key
distinguished_name = req_distinguished_name
x509_extensions = v3_ca
req_extensions = v3_ca
prompt = no
[req_distinguished_name]
countryName = DE
stateOrProvinceName = SomeState
localityName = SomeCity
organizationName = MyCompany
organizationalUnitName = IT
commonName = ${MIRROR_NAME}
[v3_ca]
subjectAltName = @alt_names
[alt_names]
DNS.1 = ${MIRROR_NAME}
EOF
```
```bash
# create self signed cert
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout registry-certs/${MIRROR_NAME}.key -out registry-certs/${MIRROR_NAME}.crt -config openssl-${MIRROR_NAME}.cnf
```
### Now run the registry
```bash
# run registry as mirror
docker run -d \
--name ${MIRROR_NAME} \
--network $DOCKER_KIND_NETWORK \
-p 443:443 \
-v $(pwd)/registry-certs:/certs \
-e REGISTRY_HTTP_ADDR=0.0.0.0:443 \
-e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/${MIRROR_NAME}.crt \
-e REGISTRY_HTTP_TLS_KEY=/certs/${MIRROR_NAME}.key \
-e REGISTRY_PROXY_REMOTEURL="https://registry-1.docker.io" \
registry:2
```
### Next run the kind cluster
```bash
# create kind cluster
cat <<EOF | kind create cluster --name cluster-with-registry-mirror --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["${MIRROR_NAME}"]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."${MIRROR_NAME}".tls]
insecure_skip_verify = true
EOF
```
### Log the registry and do a deployment
```bash
# in another terminal
docker logs -f ${MIRROR_NAME}
```
```bash
# check images in the cluster before deployment
docker exec -it cluster-with-registry-mirror-control-plane crictl image ls
# do deployment
kubectl run busybox --image=busybox -- /bin/sh -c "sleep 3600"
# check images in the cluster again, you should see busybox
docker exec -it cluster-with-registry-mirror-control-plane crictl image ls
```
## journalctl
You also can check the containerd logs:
```bash
docker exec -it cluster-with-registry-mirror-control-plane journalctl -u containerd
```
See also:
* Logging variants: https://www.baeldung.com/ops/containerd-check-logs
* Monitoring containerd: https://collabnix.com/monitoring-containerd/
### debug journalctl
* https://gvisor.dev/docs/
* https://gvisor.dev/docs/user_guide/containerd/configuration/
```bash
cat <<EOF | kind create cluster --name cluster-with-registry-mirror --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[debug]
level="debug"
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["${MIRROR_NAME}"]
EOF
```
## Integrate registry proxy
If you have a running registry proxy which also proxies the mirror, e.g. started like
```bash
CACHE_PROXY_NAME=docker_registry_proxy
docker run -itd \
--restart always \
--name $CACHE_PROXY_NAME \
--network $DOCKER_KIND_NETWORK \
--hostname $CACHE_PROXY_NAME \
-p 0.0.0.0:${HOST_PORT:-3128}:3128 \
-e ENABLE_MANIFEST_CACHE=true \
-v $(pwd)/docker_mirror_cache:/docker_mirror_cache \
-v $(pwd)/docker_mirror_certs:/ca \
-e REGISTRIES="$MIRROR_NAME k8s.gcr.io gcr.io quay.io docker.elastic.co" \
rpardini/docker-registry-proxy:0.6.2
```
then you need to make the proxy aware of the mirror's certificate.
### set mirror ca in proxy
The proxy is ssl veryfying upstreams. So we need to place the ca of the mirror.
```bash
docker cp registry-certs/registry-1.docker.io.mirror.test.crt docker_registry_proxy:/
docker exec -it docker_registry_proxy bash -c 'cat /registry-1.docker.io.mirror.test.crt >> /etc/ssl/certs/ca-certificates.crt'
docker exec -it docker_registry_proxy bash -c 'kill -SIGHUP $(cat /run/nginx.pid)'
```

View file

@ -1,16 +0,0 @@
# Review / Product-Demo 27.05.25
* central-forgejo in OTC -> war nur test
* API OTC sold out: kann man auf ressourcenengpässe in der cloud beim provisioniren reagieren?
Fragen Alex endther:
* CI für cluster generieren (nightly?)
* kubernetes version ?
* wie läuft migration von kubernetes?
# Retro
Dailis overload
Salzstruer in den Mobs - not everything can be solved in mobs

View file

@ -1,7 +1,6 @@
---
title: Overview
title: Project
weight: 5
description: How we organize work and proceed as team, which decisions we made, what outputs and outcomes we have
---
How we organize work and proceed as team, which decisions we made, what outputs and outcomes we have

View file

@ -18,7 +18,7 @@ Let's start with a look into the history of platform engineering. A good startin
They create lots of [beautiful articles and insights](https://humanitec.com/blog), their own [platform products](https://humanitec.com/products/) and [basic concepts for the platform architecture](https://humanitec.com/platform-engineering) (we'll meet this later on!).
<img src="./3_platforming/humanitec-history.png" width="600" alt="https://platformengineering.org/blog/the-story-of-platform-engineering">
<img src="./humanitec-history.png" width="600" alt="https://platformengineering.org/blog/the-story-of-platform-engineering">
### Further nice reference to the raise of platforms
@ -41,7 +41,7 @@ When looking at these 'capabilities', we have CNCF itself:
There is a CNCF working group which provides the definition of [Capabilities of platforms](https://tag-app-delivery.cncf.io/whitepapers/platforms/#capabilities-of-platforms) and shows a first idea of the layered architecture of platforms as **service layer for developers** ("product and application teams"):
<img src="./3_platforming/platforms-def.drawio.png" width="600">
<img src="./platforms-def.drawio.png" width="600">
> Important: As Platform engineer also notice the [platform-eng-maturity-model](https://tag-app-delivery.cncf.io/whitepapers/platform-eng-maturity-model/)
@ -50,7 +50,7 @@ There is a CNCF working group which provides the definition of [Capabilities of
Or, in another illustration for the platform as a developer service interface, which also defines the **'Platform Engineering Team'** inbetween:
<img src="./3_platforming/platform-self-services.webp" width="600" alt="https://medium.com/@bijit211987/what-is-platform-engineering-and-how-it-reduce-cognitive-load-on-developers-ac7805603925">
<img src="./platform-self-services.webp" width="600" alt="https://medium.com/@bijit211987/what-is-platform-engineering-and-how-it-reduce-cognitive-load-on-developers-ac7805603925">
## How to set up Platforms
@ -77,7 +77,7 @@ Build or buy - this is also in pltaform engineering a tweaked discussion, which
{{% pageinfo color="info" %}}
### What comes next?
[Next](./orchestrators.md) we'll see how these concepts got structured!
[Next](../orchestrators/) we'll see how these concepts got structured!
{{% /pageinfo %}}
## Addendum

Some files were not shown because too many files have changed in this diff Show more