This commit is contained in:
Marco Ebert 2024-09-18 00:12:49 +01:00 committed by GitHub
commit 0a495619c4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 405 additions and 409 deletions

View file

@ -208,13 +208,12 @@ jobs:
path: docker.tar.gz
retention-days: 5
helm-lint:
name: Helm chart lint
chart-lint:
name: Chart / Lint
runs-on: ubuntu-latest
needs:
- changes
if: |
(needs.changes.outputs.charts == 'true') || (needs.changes.outputs.baseimage == 'true') || ${{ github.event.workflow_dispatch.run_e2e == 'true' }}
if: fromJSON(needs.changes.outputs.charts) || fromJSON(needs.changes.outputs.baseimage) || fromJSON(github.event.workflow_dispatch.run_e2e)
steps:
- name: Checkout
@ -222,55 +221,56 @@ jobs:
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
with:
python-version: "3.x"
- name: Set up Helm
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
with:
python-version: '3.x'
- name: Set up chart-testing
- name: Set up Helm Chart Testing
uses: helm/chart-testing-action@e6669bcd63d7cb57cb4380c33043eebe5d111992 # v2.6.1
- name: Install Helm Unit Test Plugin
- name: Set up Helm Docs
uses: gabe565/setup-helm-docs-action@79a4cfe8dc7ae51c1113e3f3a1ae45e601d0c963 # v1.0.3
- name: Generate docs
run: |
helm-docs --chart-search-root charts
if ! git diff --exit-code charts/ingress-nginx/README.md
then
echo "Please use helm-docs in your fork of the project and commit an updated README.md for the chart."
echo "https://github.com/kubernetes/ingress-nginx/blob/main/MANUAL_RELEASE.md#d-edit-the-valuesyaml-and-run-helm-docs"
exit 1
fi
- name: Lint chart
run: |
ct lint --config ./.ct.yaml
curl --silent --show-error --fail --location https://github.com/artifacthub/hub/releases/download/v1.17.0/ah_1.17.0_linux_amd64.tar.gz --remote-name
echo "57c184b71a9a5c59192c2158fc08bdddca5c340fb1deeed0158383a665b38bf1 ah_1.17.0_linux_amd64.tar.gz" | shasum --check
tar xzf ah_1.17.0_linux_amd64.tar.gz ah
./ah lint --path charts/ingress-nginx
rm ah_1.17.0_linux_amd64.tar.gz ah
- name: Run unit tests
run: |
helm plugin install https://github.com/helm-unittest/helm-unittest
helm unittest charts/ingress-nginx
- name: Run Helm Unit Tests
run: |
helm unittest charts/ingress-nginx -d
- name: Run chart-testing (lint)
run: ct lint --config ./.ct.yaml
- name: Run helm-docs
run: |
GOBIN=$PWD GO111MODULE=on go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.0
./helm-docs --chart-search-root=${GITHUB_WORKSPACE}/charts
DIFF=$(git diff ${GITHUB_WORKSPACE}/charts/ingress-nginx/README.md)
if [ ! -z "$DIFF" ]; then
echo "Please use helm-docs in your clone, of your fork, of the project, and commit a updated README.md for the chart. https://github.com/kubernetes/ingress-nginx/blob/main/RELEASE.md#d-edit-the-valuesyaml-and-run-helm-docs"
fi
git diff --exit-code
rm -f ./helm-docs
- name: Run Artifact Hub lint
run: |
wget https://github.com/artifacthub/hub/releases/download/v1.5.0/ah_1.5.0_linux_amd64.tar.gz
echo 'ad0e44c6ea058ab6b85dbf582e88bad9fdbc64ded0d1dd4edbac65133e5c87da *ah_1.5.0_linux_amd64.tar.gz' | shasum -c
tar -xzvf ah_1.5.0_linux_amd64.tar.gz ah
./ah lint -p charts/ingress-nginx || exit 1
rm -f ./ah ./ah_1.5.0_linux_amd64.tar.gz
helm-test:
name: Helm chart testing
chart-test:
name: Chart / Test
runs-on: ubuntu-latest
needs:
- changes
- build
- helm-lint
if: |
(needs.changes.outputs.charts == 'true') || (needs.changes.outputs.baseimage == 'true') || ${{ github.event.workflow_dispatch.run_e2e == 'true' }}
- chart-lint
if: fromJSON(needs.changes.outputs.charts) || fromJSON(needs.changes.outputs.baseimage) || fromJSON(github.event.workflow_dispatch.run_e2e)
strategy:
matrix:
@ -286,35 +286,27 @@ jobs:
go-version: ${{ needs.build.outputs.golangversion }}
check-latest: true
- name: cache
- name: Download cache
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: docker.tar.gz
- name: fix permissions
run: |
sudo mkdir -p $HOME/.kube
sudo chmod -R 777 $HOME/.kube
- name: Load cache
run: gzip --decompress --stdout docker.tar.gz | docker load
- name: Create Kubernetes ${{ matrix.k8s }} cluster
- name: Create cluster
id: kind
run: |
kind create cluster --image=kindest/node:${{ matrix.k8s }}
sudo chmod 777 "${HOME}/.kube"
kind create cluster --image kindest/node:${{ matrix.k8s }} --kubeconfig "${HOME}/.kube/kind-config-kind"
- name: Load images from cache
run: |
echo "loading docker images..."
gzip -dc docker.tar.gz | docker load
- name: Test
- name: Run E2E tests
env:
KIND_CLUSTER_NAME: kind
SKIP_CLUSTER_CREATION: true
SKIP_IMAGE_CREATION: true
SKIP_INGRESS_IMAGE_CREATION: true
run: |
kind get kubeconfig > $HOME/.kube/kind-config-kind
make kind-e2e-chart-tests
run: make kind-e2e-chart-tests
kubernetes:
name: Kubernetes

View file

@ -29,19 +29,20 @@ jobs:
- name: Checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Run Artifact Hub lint
run: |
wget https://github.com/artifacthub/hub/releases/download/v1.5.0/ah_1.5.0_linux_amd64.tar.gz
echo 'ad0e44c6ea058ab6b85dbf582e88bad9fdbc64ded0d1dd4edbac65133e5c87da *ah_1.5.0_linux_amd64.tar.gz' | shasum -c
tar -xzvf ah_1.5.0_linux_amd64.tar.gz ah
./ah lint -p charts/ingress-nginx || exit 1
rm -f ./ah ./ah_1.5.0_linux_amd64.tar.gz
- name: Set up chart-testing
uses: helm/chart-testing-action@e6669bcd63d7cb57cb4380c33043eebe5d111992 # v2.6.1
- name: Run chart-testing (lint)
run: ct lint --target-branch ${{ github.ref_name }} --config ./.ct.yaml
- name: Lint chart
run: |
ct lint --config ./.ct.yaml --target-branch ${{ github.ref_name }}
curl --silent --show-error --fail --location https://github.com/artifacthub/hub/releases/download/v1.17.0/ah_1.17.0_linux_amd64.tar.gz --remote-name
echo "57c184b71a9a5c59192c2158fc08bdddca5c340fb1deeed0158383a665b38bf1 ah_1.17.0_linux_amd64.tar.gz" | shasum --check
tar xzf ah_1.17.0_linux_amd64.tar.gz ah
./ah lint --path charts/ingress-nginx
rm ah_1.17.0_linux_amd64.tar.gz ah
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
id: filter

1
.gitignore vendored
View file

@ -1,4 +1,3 @@
helm-docs
# OSX
._*
.DS_Store

View file

@ -231,14 +231,12 @@ Promoting the images basically means that images, that were pushed to staging co
- tag
- digest
- [helm-docs](https://github.com/norwoodj/helm-docs) is a tool that generates the README.md for a helm-chart automatically. In the CI pipeline workflow of github actions (/.github/workflows/ci.yaml), you can see how helm-docs is used. But the CI pipeline is not designed to make commits back into the project. So we need to run helm-docs manually, and check in the resulting autogenerated README.md at the path /charts/ingress-nginx/README.md
- [helm-docs](https://github.com/norwoodj/helm-docs) is a tool that generates the README.md for a Helm chart automatically. In the CI pipeline workflow of GitHub actions (.github/workflows/ci.yaml), you can see how helm-docs is used. The CI pipeline is not designed to make commits back into the project, so we need to run helm-docs manually and commit the resulting generated README.md. You can obtain a recent version of the helm-docs binary here: https://github.com/norwoodj/helm-docs/releases.
```
GOBIN=$PWD GO111MODULE=on go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.0
./helm-docs --chart-search-root=${GITHUB_WORKSPACE}/charts
git diff --exit-code
rm -f ./helm-docs
helm-docs --chart-search-root charts
git diff charts/ingress-nginx/README.md
```
Watchout for mistakes like leaving the helm-docs executable in your clone workspace or not checking the new README.md manually etc.
Take care of not leaving the helm-docs executable in your clone workspace or not committing the new README.md.
### e. Edit the static manifests

View file

@ -1,314 +1,314 @@
# OpenTelemetry
Enables requests served by NGINX for distributed telemetry via The OpenTelemetry Project.
Using the third party module [opentelemetry-cpp-contrib/nginx](https://github.com/open-telemetry/opentelemetry-cpp-contrib/tree/main/instrumentation/nginx) the Ingress-Nginx Controller can configure NGINX to enable [OpenTelemetry](http://opentelemetry.io) instrumentation.
By default this feature is disabled.
Check out this demo showcasing OpenTelemetry in Ingress NGINX. The video provides an overview and
practical demonstration of how OpenTelemetry can be utilized in Ingress NGINX for observability
and monitoring purposes.
<p align="center">
<a href="https://www.youtube.com/watch?v=jpBfgJpTcfw&t=129" target="_blank" rel="noopener noreferrer">
<img src="https://img.youtube.com/vi/jpBfgJpTcfw/0.jpg" alt="Video Thumbnail" />
</a>
</p>
<p align="center">Demo: OpenTelemetry in Ingress NGINX.</p>
## Usage
To enable the instrumentation we must enable OpenTelemetry in the configuration ConfigMap:
```yaml
data:
enable-opentelemetry: "true"
```
To enable or disable instrumentation for a single Ingress, use
the `enable-opentelemetry` annotation:
```yaml
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/enable-opentelemetry: "true"
```
We must also set the host to use when uploading traces:
```yaml
otlp-collector-host: "otel-coll-collector.otel.svc"
```
NOTE: While the option is called `otlp-collector-host`, you will need to point this to any backend that receives otlp-grpc.
Next you will need to deploy a distributed telemetry system which uses OpenTelemetry.
[opentelemetry-collector](https://github.com/open-telemetry/opentelemetry-collector), [Jaeger](https://www.jaegertracing.io/)
[Tempo](https://github.com/grafana/tempo), and [zipkin](https://zipkin.io/)
have been tested.
Other optional configuration options:
```yaml
# specifies the name to use for the server span
opentelemetry-operation-name
# sets whether or not to trust incoming telemetry spans
opentelemetry-trust-incoming-span
# specifies the port to use when uploading traces, Default: 4317
otlp-collector-port
# specifies the service name to use for any traces created, Default: nginx
otel-service-name
# The maximum queue size. After the size is reached data are dropped.
otel-max-queuesize
# The delay interval in milliseconds between two consecutive exports.
otel-schedule-delay-millis
# How long the export can run before it is cancelled.
otel-schedule-delay-millis
# The maximum batch size of every export. It must be smaller or equal to maxQueueSize.
otel-max-export-batch-size
# specifies sample rate for any traces created, Default: 0.01
otel-sampler-ratio
# specifies the sampler to be used when sampling traces.
# The available samplers are: AlwaysOn, AlwaysOff, TraceIdRatioBased, Default: AlwaysOff
otel-sampler
# Uses sampler implementation which by default will take a sample if parent Activity is sampled, Default: false
otel-sampler-parent-based
```
Note that you can also set whether to trust incoming spans (global default is true) per-location using annotations like the following:
```yaml
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/opentelemetry-trust-incoming-span: "true"
```
## Examples
The following examples show how to deploy and test different distributed telemetry systems. These example can be performed using Docker Desktop.
In the [esigo/nginx-example](https://github.com/esigo/nginx-example)
GitHub repository is an example of a simple hello service:
```mermaid
graph TB
subgraph Browser
start["http://esigo.dev/hello/nginx"]
end
subgraph app
sa[service-a]
sb[service-b]
sa --> |name: nginx| sb
sb --> |hello nginx!| sa
end
subgraph otel
otc["Otel Collector"]
end
subgraph observability
tempo["Tempo"]
grafana["Grafana"]
backend["Jaeger"]
zipkin["Zipkin"]
end
subgraph ingress-nginx
ngx[nginx]
end
subgraph ngx[nginx]
ng[nginx]
om[OpenTelemetry module]
end
subgraph Node
app
otel
observability
ingress-nginx
om --> |otlp-gRPC| otc --> |jaeger| backend
otc --> |zipkin| zipkin
otc --> |otlp-gRPC| tempo --> grafana
sa --> |otlp-gRPC| otc
sb --> |otlp-gRPC| otc
start --> ng --> sa
end
```
To install the example and collectors run:
1. Enable Ingress addon with:
```yaml
opentelemetry:
enabled: true
image: registry.k8s.io/ingress-nginx/opentelemetry-1.25.3:v20240813-b933310d@sha256:f7604ac0547ed64d79b98d92133234e66c2c8aade3c1f4809fed5eec1fb7f922
containerSecurityContext:
allowPrivilegeEscalation: false
```
2. Enable OpenTelemetry and set the otlp-collector-host:
```yaml
$ echo '
apiVersion: v1
kind: ConfigMap
data:
enable-opentelemetry: "true"
opentelemetry-config: "/etc/nginx/opentelemetry.toml"
opentelemetry-operation-name: "HTTP $request_method $service_name $uri"
opentelemetry-trust-incoming-span: "true"
otlp-collector-host: "otel-coll-collector.otel.svc"
otlp-collector-port: "4317"
otel-max-queuesize: "2048"
otel-schedule-delay-millis: "5000"
otel-max-export-batch-size: "512"
otel-service-name: "nginx-proxy" # Opentelemetry resource name
otel-sampler: "AlwaysOn" # Also: AlwaysOff, TraceIdRatioBased
otel-sampler-ratio: "1.0"
otel-sampler-parent-based: "false"
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
' | kubectl replace -f -
```
4. Deploy otel-collector, grafana and Jaeger backend:
```bash
# add helm charts needed for grafana and OpenTelemetry collector
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
# deploy cert-manager needed for OpenTelemetry collector operator
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml
# create observability namespace
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/namespace.yaml
# install OpenTelemetry collector operator
helm upgrade --install otel-collector-operator -n otel --create-namespace open-telemetry/opentelemetry-operator
# deploy OpenTelemetry collector
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/collector.yaml
# deploy Jaeger all-in-one
kubectl apply -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.37.0/jaeger-operator.yaml -n observability
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/jaeger.yaml -n observability
# deploy zipkin
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/zipkin.yaml -n observability
# deploy tempo and grafana
helm upgrade --install tempo grafana/tempo --create-namespace -n observability
helm upgrade -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/grafana/grafana-values.yaml --install grafana grafana/grafana --create-namespace -n observability
```
3. Build and deploy demo app:
```bash
# build images
make images
# deploy demo app:
make deploy-app
```
5. Make a few requests to the Service:
```bash
kubectl port-forward --namespace=ingress-nginx service/ingress-nginx-controller 8090:80
curl http://esigo.dev:8090/hello/nginx
StatusCode : 200
StatusDescription : OK
Content : {"v":"hello nginx!"}
RawContent : HTTP/1.1 200 OK
Connection: keep-alive
Content-Length: 21
Content-Type: text/plain; charset=utf-8
Date: Mon, 10 Oct 2022 17:43:33 GMT
{"v":"hello nginx!"}
Forms : {}
Headers : {[Connection, keep-alive], [Content-Length, 21], [Content-Type, text/plain; charset=utf-8], [Date,
Mon, 10 Oct 2022 17:43:33 GMT]}
Images : {}
InputFields : {}
Links : {}
ParsedHtml : System.__ComObject
RawContentLength : 21
```
6. View the Grafana UI:
```bash
kubectl port-forward --namespace=observability service/grafana 3000:80
```
In the Grafana interface we can see the details:
![grafana screenshot](../../images/otel-grafana-demo.png "grafana screenshot")
7. View the Jaeger UI:
```bash
kubectl port-forward --namespace=observability service/jaeger-all-in-one-query 16686:16686
```
In the Jaeger interface we can see the details:
![Jaeger screenshot](../../images/otel-jaeger-demo.png "Jaeger screenshot")
8. View the Zipkin UI:
```bash
kubectl port-forward --namespace=observability service/zipkin 9411:9411
```
In the Zipkin interface we can see the details:
![zipkin screenshot](../../images/otel-zipkin-demo.png "zipkin screenshot")
## Migration from OpenTracing, Jaeger, Zipkin and Datadog
If you are migrating from OpenTracing, Jaeger, Zipkin, or Datadog to OpenTelemetry,
you may need to update various annotations and configurations. Here are the mappings
for common annotations and configurations:
### Annotations
| Legacy | OpenTelemetry |
|---------------------------------------------------------------|-----------------------------------------------------------------|
| `nginx.ingress.kubernetes.io/enable-opentracing` | `nginx.ingress.kubernetes.io/enable-opentelemetry` |
| `nginx.ingress.kubernetes.io/opentracing-trust-incoming-span` | `nginx.ingress.kubernetes.io/opentelemetry-trust-incoming-span` |
### Configs
| Legacy | OpenTelemetry |
|---------------------------------------|----------------------------------------------|
| `opentracing-operation-name` | `opentelemetry-operation-name` |
| `opentracing-location-operation-name` | `opentelemetry-operation-name` |
| `opentracing-trust-incoming-span` | `opentelemetry-trust-incoming-span` |
| `zipkin-collector-port` | `otlp-collector-port` |
| `zipkin-service-name` | `otel-service-name` |
| `zipkin-sample-rate` | `otel-sampler-ratio` |
| `jaeger-collector-port` | `otlp-collector-port` |
| `jaeger-endpoint` | `otlp-collector-port`, `otlp-collector-host` |
| `jaeger-service-name` | `otel-service-name` |
| `jaeger-propagation-format` | `N/A` |
| `jaeger-sampler-type` | `otel-sampler` |
| `jaeger-sampler-param` | `otel-sampler` |
| `jaeger-sampler-host` | `N/A` |
| `jaeger-sampler-port` | `N/A` |
| `jaeger-trace-context-header-name` | `N/A` |
| `jaeger-debug-header` | `N/A` |
| `jaeger-baggage-header` | `N/A` |
| `jaeger-tracer-baggage-header-prefix` | `N/A` |
| `datadog-collector-port` | `otlp-collector-port` |
| `datadog-service-name` | `otel-service-name` |
| `datadog-environment` | `N/A` |
| `datadog-operation-name-override` | `N/A` |
| `datadog-priority-sampling` | `otel-sampler` |
| `datadog-sample-rate` | `otel-sampler-ratio` |
# OpenTelemetry
Enables requests served by NGINX for distributed telemetry via The OpenTelemetry Project.
Using the third party module [opentelemetry-cpp-contrib/nginx](https://github.com/open-telemetry/opentelemetry-cpp-contrib/tree/main/instrumentation/nginx) the Ingress-Nginx Controller can configure NGINX to enable [OpenTelemetry](http://opentelemetry.io) instrumentation.
By default this feature is disabled.
Check out this demo showcasing OpenTelemetry in Ingress NGINX. The video provides an overview and
practical demonstration of how OpenTelemetry can be utilized in Ingress NGINX for observability
and monitoring purposes.
<p align="center">
<a href="https://www.youtube.com/watch?v=jpBfgJpTcfw&t=129" target="_blank" rel="noopener noreferrer">
<img src="https://img.youtube.com/vi/jpBfgJpTcfw/0.jpg" alt="Video Thumbnail" />
</a>
</p>
<p align="center">Demo: OpenTelemetry in Ingress NGINX.</p>
## Usage
To enable the instrumentation we must enable OpenTelemetry in the configuration ConfigMap:
```yaml
data:
enable-opentelemetry: "true"
```
To enable or disable instrumentation for a single Ingress, use
the `enable-opentelemetry` annotation:
```yaml
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/enable-opentelemetry: "true"
```
We must also set the host to use when uploading traces:
```yaml
otlp-collector-host: "otel-coll-collector.otel.svc"
```
NOTE: While the option is called `otlp-collector-host`, you will need to point this to any backend that receives otlp-grpc.
Next you will need to deploy a distributed telemetry system which uses OpenTelemetry.
[opentelemetry-collector](https://github.com/open-telemetry/opentelemetry-collector), [Jaeger](https://www.jaegertracing.io/)
[Tempo](https://github.com/grafana/tempo), and [zipkin](https://zipkin.io/)
have been tested.
Other optional configuration options:
```yaml
# specifies the name to use for the server span
opentelemetry-operation-name
# sets whether or not to trust incoming telemetry spans
opentelemetry-trust-incoming-span
# specifies the port to use when uploading traces, Default: 4317
otlp-collector-port
# specifies the service name to use for any traces created, Default: nginx
otel-service-name
# The maximum queue size. After the size is reached data are dropped.
otel-max-queuesize
# The delay interval in milliseconds between two consecutive exports.
otel-schedule-delay-millis
# How long the export can run before it is cancelled.
otel-schedule-delay-millis
# The maximum batch size of every export. It must be smaller or equal to maxQueueSize.
otel-max-export-batch-size
# specifies sample rate for any traces created, Default: 0.01
otel-sampler-ratio
# specifies the sampler to be used when sampling traces.
# The available samplers are: AlwaysOn, AlwaysOff, TraceIdRatioBased, Default: AlwaysOff
otel-sampler
# Uses sampler implementation which by default will take a sample if parent Activity is sampled, Default: false
otel-sampler-parent-based
```
Note that you can also set whether to trust incoming spans (global default is true) per-location using annotations like the following:
```yaml
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/opentelemetry-trust-incoming-span: "true"
```
## Examples
The following examples show how to deploy and test different distributed telemetry systems. These example can be performed using Docker Desktop.
In the [esigo/nginx-example](https://github.com/esigo/nginx-example)
GitHub repository is an example of a simple hello service:
```mermaid
graph TB
subgraph Browser
start["http://esigo.dev/hello/nginx"]
end
subgraph app
sa[service-a]
sb[service-b]
sa --> |name: nginx| sb
sb --> |hello nginx!| sa
end
subgraph otel
otc["Otel Collector"]
end
subgraph observability
tempo["Tempo"]
grafana["Grafana"]
backend["Jaeger"]
zipkin["Zipkin"]
end
subgraph ingress-nginx
ngx[nginx]
end
subgraph ngx[nginx]
ng[nginx]
om[OpenTelemetry module]
end
subgraph Node
app
otel
observability
ingress-nginx
om --> |otlp-gRPC| otc --> |jaeger| backend
otc --> |zipkin| zipkin
otc --> |otlp-gRPC| tempo --> grafana
sa --> |otlp-gRPC| otc
sb --> |otlp-gRPC| otc
start --> ng --> sa
end
```
To install the example and collectors run:
1. Enable Ingress addon with:
```yaml
opentelemetry:
enabled: true
image: registry.k8s.io/ingress-nginx/opentelemetry-1.25.3:v20240813-b933310d@sha256:f7604ac0547ed64d79b98d92133234e66c2c8aade3c1f4809fed5eec1fb7f922
containerSecurityContext:
allowPrivilegeEscalation: false
```
2. Enable OpenTelemetry and set the otlp-collector-host:
```yaml
$ echo '
apiVersion: v1
kind: ConfigMap
data:
enable-opentelemetry: "true"
opentelemetry-config: "/etc/nginx/opentelemetry.toml"
opentelemetry-operation-name: "HTTP $request_method $service_name $uri"
opentelemetry-trust-incoming-span: "true"
otlp-collector-host: "otel-coll-collector.otel.svc"
otlp-collector-port: "4317"
otel-max-queuesize: "2048"
otel-schedule-delay-millis: "5000"
otel-max-export-batch-size: "512"
otel-service-name: "nginx-proxy" # Opentelemetry resource name
otel-sampler: "AlwaysOn" # Also: AlwaysOff, TraceIdRatioBased
otel-sampler-ratio: "1.0"
otel-sampler-parent-based: "false"
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
' | kubectl replace -f -
```
4. Deploy otel-collector, grafana and Jaeger backend:
```bash
# add helm charts needed for grafana and OpenTelemetry collector
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
# deploy cert-manager needed for OpenTelemetry collector operator
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.yaml
# create observability namespace
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/namespace.yaml
# install OpenTelemetry collector operator
helm upgrade --install otel-collector-operator -n otel --create-namespace open-telemetry/opentelemetry-operator
# deploy OpenTelemetry collector
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/collector.yaml
# deploy Jaeger all-in-one
kubectl apply -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.37.0/jaeger-operator.yaml -n observability
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/jaeger.yaml -n observability
# deploy zipkin
kubectl apply -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/zipkin.yaml -n observability
# deploy tempo and grafana
helm upgrade --install tempo grafana/tempo --create-namespace -n observability
helm upgrade -f https://raw.githubusercontent.com/esigo/nginx-example/main/observability/grafana/grafana-values.yaml --install grafana grafana/grafana --create-namespace -n observability
```
3. Build and deploy demo app:
```bash
# build images
make images
# deploy demo app:
make deploy-app
```
5. Make a few requests to the Service:
```bash
kubectl port-forward --namespace=ingress-nginx service/ingress-nginx-controller 8090:80
curl http://esigo.dev:8090/hello/nginx
StatusCode : 200
StatusDescription : OK
Content : {"v":"hello nginx!"}
RawContent : HTTP/1.1 200 OK
Connection: keep-alive
Content-Length: 21
Content-Type: text/plain; charset=utf-8
Date: Mon, 10 Oct 2022 17:43:33 GMT
{"v":"hello nginx!"}
Forms : {}
Headers : {[Connection, keep-alive], [Content-Length, 21], [Content-Type, text/plain; charset=utf-8], [Date,
Mon, 10 Oct 2022 17:43:33 GMT]}
Images : {}
InputFields : {}
Links : {}
ParsedHtml : System.__ComObject
RawContentLength : 21
```
6. View the Grafana UI:
```bash
kubectl port-forward --namespace=observability service/grafana 3000:80
```
In the Grafana interface we can see the details:
![grafana screenshot](../../images/otel-grafana-demo.png "grafana screenshot")
7. View the Jaeger UI:
```bash
kubectl port-forward --namespace=observability service/jaeger-all-in-one-query 16686:16686
```
In the Jaeger interface we can see the details:
![Jaeger screenshot](../../images/otel-jaeger-demo.png "Jaeger screenshot")
8. View the Zipkin UI:
```bash
kubectl port-forward --namespace=observability service/zipkin 9411:9411
```
In the Zipkin interface we can see the details:
![zipkin screenshot](../../images/otel-zipkin-demo.png "zipkin screenshot")
## Migration from OpenTracing, Jaeger, Zipkin and Datadog
If you are migrating from OpenTracing, Jaeger, Zipkin, or Datadog to OpenTelemetry,
you may need to update various annotations and configurations. Here are the mappings
for common annotations and configurations:
### Annotations
| Legacy | OpenTelemetry |
|---------------------------------------------------------------|-----------------------------------------------------------------|
| `nginx.ingress.kubernetes.io/enable-opentracing` | `nginx.ingress.kubernetes.io/enable-opentelemetry` |
| `nginx.ingress.kubernetes.io/opentracing-trust-incoming-span` | `nginx.ingress.kubernetes.io/opentelemetry-trust-incoming-span` |
### Configs
| Legacy | OpenTelemetry |
|---------------------------------------|----------------------------------------------|
| `opentracing-operation-name` | `opentelemetry-operation-name` |
| `opentracing-location-operation-name` | `opentelemetry-operation-name` |
| `opentracing-trust-incoming-span` | `opentelemetry-trust-incoming-span` |
| `zipkin-collector-port` | `otlp-collector-port` |
| `zipkin-service-name` | `otel-service-name` |
| `zipkin-sample-rate` | `otel-sampler-ratio` |
| `jaeger-collector-port` | `otlp-collector-port` |
| `jaeger-endpoint` | `otlp-collector-port`, `otlp-collector-host` |
| `jaeger-service-name` | `otel-service-name` |
| `jaeger-propagation-format` | `N/A` |
| `jaeger-sampler-type` | `otel-sampler` |
| `jaeger-sampler-param` | `otel-sampler` |
| `jaeger-sampler-host` | `N/A` |
| `jaeger-sampler-port` | `N/A` |
| `jaeger-trace-context-header-name` | `N/A` |
| `jaeger-debug-header` | `N/A` |
| `jaeger-baggage-header` | `N/A` |
| `jaeger-tracer-baggage-header-prefix` | `N/A` |
| `datadog-collector-port` | `otlp-collector-port` |
| `datadog-service-name` | `otel-service-name` |
| `datadog-environment` | `N/A` |
| `datadog-operation-name-override` | `N/A` |
| `datadog-priority-sampling` | `otel-sampler` |
| `datadog-sample-rate` | `otel-sampler-ratio` |

View file

@ -170,7 +170,7 @@ func runHelmDocs() error {
if err != nil {
return err
}
err = sh.RunV("helm-docs", "--chart-search-root=${PWD}/charts")
err = sh.RunV("helm-docs", "--chart-search-root", "${PWD}/charts")
if err != nil {
return err
}
@ -181,7 +181,7 @@ func installHelmDocs() error {
utils.Info("HELM Install HelmDocs")
g0 := sh.RunCmd("go")
err := g0("install", "github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.0")
err := g0("install", "github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.3")
if err != nil {
return err
}

View file

@ -91,25 +91,31 @@ echo "[dev-env] copying docker images to cluster..."
kind load docker-image --name="${KIND_CLUSTER_NAME}" --nodes=${KIND_WORKERS} ${REGISTRY}/controller:${TAG}
if [ "${SKIP_CERT_MANAGER_CREATION:-false}" = "false" ]; then
curl -fsSL -o cmctl.tar.gz https://github.com/cert-manager/cert-manager/releases/download/v1.11.1/cmctl-linux-amd64.tar.gz
tar xzf cmctl.tar.gz
chmod +x cmctl
./cmctl help
echo "[dev-env] apply cert-manager ..."
kubectl apply --wait -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml
kubectl wait --timeout=30s --for=condition=available deployment/cert-manager -n cert-manager
kubectl get validatingwebhookconfigurations cert-manager-webhook -ojson | jq '.webhooks[].clientConfig'
kubectl get endpoints -n cert-manager cert-manager-webhook
./cmctl check api --wait=2m
echo "[dev-env] deploying cert-manager..."
# Get OS and platform for downloading cmctl.
os="$(uname -o | tr "[:upper:]" "[:lower:]")"
platform="$(uname -m | sed "s/aarch64/arm64/;s/x86_64/amd64/")"
# Download cmctl. Cannot validate checksum since OS and platform may vary.
curl --silent --show-error --fail --location "https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cmctl-${os}-${platform}.tar.gz" | tar xz cmctl
# Deploy cert-manager.
kubectl create --filename https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.yaml
kubectl rollout status deployment --namespace cert-manager cert-manager --timeout 30s
./cmctl check api --wait 2m
fi
echo "[dev-env] running helm chart e2e tests..."
docker run --rm --interactive --network host \
--name ct \
--volume $KUBECONFIG:/root/.kube/config \
--volume "${DIR}/../../":/workdir \
--workdir /workdir \
registry.k8s.io/ingress-nginx/e2e-test-runner:v20240829-2c421762@sha256:5b7809bfe9cbd9cd6bcb8033ca27576ca704f05ce729fe4dcb574810f7a25785 \
ct install \
--charts charts/ingress-nginx \
--helm-extra-args "--timeout 60s"
docker run \
--name ct \
--volume "${KUBECONFIG}:/root/.kube/config:ro" \
--volume "${DIR}/../../:/workdir" \
--network host \
--workdir /workdir \
--entrypoint ct \
--rm \
registry.k8s.io/ingress-nginx/e2e-test-runner:v20240829-2c421762@sha256:5b7809bfe9cbd9cd6bcb8033ca27576ca704f05ce729fe4dcb574810f7a25785 \
install \
--charts charts/ingress-nginx \
--helm-extra-args "--timeout 60s"