Merge branch 'main' into topology

This commit is contained in:
Eric Miller 2021-11-22 15:29:50 -06:00
commit b67a874090
31 changed files with 441 additions and 245 deletions

View file

@ -94,7 +94,7 @@ workflows:
- bats-unit-test - bats-unit-test
filters: filters:
branches: branches:
only: master only: main
update-helm-charts-index: update-helm-charts-index:
jobs: jobs:
- update-helm-charts-index: - update-helm-charts-index:

View file

@ -13,21 +13,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Jira sync name: Jira sync
steps: steps:
- name: Check if community user
if: github.event.action == 'opened'
id: vault-team-role
run: |
TEAM=vault
ROLE="$(hub api orgs/hashicorp/teams/${TEAM}/memberships/${{ github.actor }} | jq -r '.role | select(.!=null)')"
if [[ -n ${ROLE} ]]; then
echo "Actor ${{ github.actor }} is a ${TEAM} team member, skipping ticket creation"
else
echo "Actor ${{ github.actor }} is not a ${TEAM} team member"
fi
echo "::set-output name=role::${ROLE}"
env:
GITHUB_TOKEN: ${{ secrets.JIRA_SYNC_GITHUB_TOKEN }}
- name: Login - name: Login
uses: atlassian/gajira-login@v2.0.0 uses: atlassian/gajira-login@v2.0.0
env: env:
@ -46,7 +31,7 @@ jobs:
fi fi
- name: Create ticket - name: Create ticket
if: github.event.action == 'opened' && !steps.vault-team-role.outputs.role if: github.event.action == 'opened'
uses: tomhjp/gh-action-jira-create@v0.2.0 uses: tomhjp/gh-action-jira-create@v0.2.0
with: with:
project: VAULT project: VAULT
@ -63,7 +48,7 @@ jobs:
uses: tomhjp/gh-action-jira-search@v0.2.1 uses: tomhjp/gh-action-jira-search@v0.2.1
with: with:
# cf[10089] is Issue Link custom field # cf[10089] is Issue Link custom field
jql: 'project = "VAULT" and issuetype = "GH Issue" and cf[10089]="${{ github.event.issue.html_url || github.event.pull_request.html_url }}"' jql: 'project = "VAULT" and cf[10089]="${{ github.event.issue.html_url || github.event.pull_request.html_url }}"'
- name: Sync comment - name: Sync comment
if: github.event.action == 'created' && steps.search.outputs.issue if: github.event.action == 'created' && steps.search.outputs.issue
@ -77,11 +62,11 @@ jobs:
uses: atlassian/gajira-transition@v2.0.1 uses: atlassian/gajira-transition@v2.0.1
with: with:
issue: ${{ steps.search.outputs.issue }} issue: ${{ steps.search.outputs.issue }}
transition: Done transition: Close
- name: Reopen ticket - name: Reopen ticket
if: github.event.action == 'reopened' && steps.search.outputs.issue if: github.event.action == 'reopened' && steps.search.outputs.issue
uses: atlassian/gajira-transition@v2.0.1 uses: atlassian/gajira-transition@v2.0.1
with: with:
issue: ${{ steps.search.outputs.issue }} issue: ${{ steps.search.outputs.issue }}
transition: "To Do" transition: "Pending Triage"

View file

@ -1,5 +1,43 @@
## Unreleased ## Unreleased
## 0.18.0 (November 17th, 2021)
CHANGES:
* Removed support for deploying a leader-elector container with the [vault-k8s injector](https://github.com/hashicorp/vault-k8s) injector since vault-k8s now uses an internal mechanism to determine leadership [GH-649](https://github.com/hashicorp/vault-helm/pull/649)
* Vault image default 1.9.0
* Vault K8s image default 0.14.1
Improvements:
* Added templateConfig.staticSecretRenderInterval chart option for the injector [GH-621](https://github.com/hashicorp/vault-helm/pull/621)
## 0.17.1 (October 25th, 2021)
Improvements:
* Add option for Ingress PathType [GH-634](https://github.com/hashicorp/vault-helm/pull/634)
## 0.17.0 (October 21st, 2021)
KNOWN ISSUES:
* The chart will fail to deploy on Kubernetes 1.19+ with `server.ingress.enabled=true` because no `pathType` is set
CHANGES:
* Vault image default 1.8.4
* Vault K8s image default 0.14.0
Improvements:
* Support Ingress stable networking API [GH-590](https://github.com/hashicorp/vault-helm/pull/590)
* Support setting the `externalTrafficPolicy` for `LoadBalancer` and `NodePort` service types [GH-626](https://github.com/hashicorp/vault-helm/pull/626)
* Support setting ingressClassName on server Ingress [GH-630](https://github.com/hashicorp/vault-helm/pull/630)
Bugs:
* Ensure `kubeletRootDir` volume path and mounts are the same when `csi.daemonSet.kubeletRootDir` is overridden [GH-628](https://github.com/hashicorp/vault-helm/pull/628)
## 0.16.1 (September 29th, 2021)
CHANGES:
* Vault image default 1.8.3
* Vault K8s image default 0.13.1
## 0.16.0 (September 16th, 2021) ## 0.16.0 (September 16th, 2021)
CHANGES: CHANGES:
@ -18,7 +56,7 @@ Improvements:
## 0.14.0 (July 28th, 2021) ## 0.14.0 (July 28th, 2021)
Features: Features:
* Added templateConfig.exitOnRetryFailure annotation for the injector [GH-560](https://github.com/hashicorp/vault-helm/pull/560) * Added templateConfig.exitOnRetryFailure chart option for the injector [GH-560](https://github.com/hashicorp/vault-helm/pull/560)
Improvements: Improvements:
* Support configuring pod tolerations, pod affinity, and node selectors as YAML [GH-565](https://github.com/hashicorp/vault-helm/pull/565) * Support configuring pod tolerations, pod affinity, and node selectors as YAML [GH-565](https://github.com/hashicorp/vault-helm/pull/565)

View file

@ -26,7 +26,7 @@ quickly merge or address your contributions.
* Make sure you test against the latest released version. It is possible * Make sure you test against the latest released version. It is possible
we already fixed the bug you're experiencing. Even better is if you can test we already fixed the bug you're experiencing. Even better is if you can test
against `master`, as bugs are fixed regularly but new versions are only against `main`, as bugs are fixed regularly but new versions are only
released every few months. released every few months.
* Provide steps to reproduce the issue, and if possible include the expected * Provide steps to reproduce the issue, and if possible include the expected
@ -121,7 +121,7 @@ may not be properly cleaned up. We recommend recycling the Kubernetes cluster to
start from a clean slate. start from a clean slate.
**Note:** There is a Terraform configuration in the **Note:** There is a Terraform configuration in the
[`test/terraform/`](https://github.com/hashicorp/vault-helm/tree/master/test/terraform) directory [`test/terraform/`](https://github.com/hashicorp/vault-helm/tree/main/test/terraform) directory
that can be used to quickly bring up a GKE cluster and configure that can be used to quickly bring up a GKE cluster and configure
`kubectl` and `helm` locally. This can be used to quickly spin up a test `kubectl` and `helm` locally. This can be used to quickly spin up a test
cluster for acceptance tests. Unit tests _do not_ require a running Kubernetes cluster for acceptance tests. Unit tests _do not_ require a running Kubernetes

View file

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
name: vault name: vault
version: 0.16.0 version: 0.18.0
appVersion: 1.8.2 appVersion: 1.9.0
kubeVersion: ">= 1.14.0-0" kubeVersion: ">= 1.14.0-0"
description: Official HashiCorp Vault Chart description: Official HashiCorp Vault Chart
home: https://www.vaultproject.io home: https://www.vaultproject.io

View file

@ -40,6 +40,7 @@ else
-e GOOGLE_CREDENTIALS=${GOOGLE_CREDENTIALS} \ -e GOOGLE_CREDENTIALS=${GOOGLE_CREDENTIALS} \
-e CLOUDSDK_CORE_PROJECT=${CLOUDSDK_CORE_PROJECT} \ -e CLOUDSDK_CORE_PROJECT=${CLOUDSDK_CORE_PROJECT} \
-e KUBECONFIG=/helm-test/.kube/config \ -e KUBECONFIG=/helm-test/.kube/config \
-e VAULT_LICENSE_CI=${VAULT_LICENSE_CI} \
-w /helm-test \ -w /helm-test \
$(TEST_IMAGE) \ $(TEST_IMAGE) \
make acceptance make acceptance

View file

@ -686,3 +686,38 @@ imagePullSecrets:
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/*
externalTrafficPolicy sets a Service's externalTrafficPolicy if applicable.
Supported inputs are Values.server.service and Values.ui
*/}}
{{- define "service.externalTrafficPolicy" -}}
{{- $type := "" -}}
{{- if .serviceType -}}
{{- $type = .serviceType -}}
{{- else if .type -}}
{{- $type = .type -}}
{{- end -}}
{{- if and .externalTrafficPolicy (or (eq $type "LoadBalancer") (eq $type "NodePort")) }}
externalTrafficPolicy: {{ .externalTrafficPolicy }}
{{- else }}
{{- end }}
{{- end -}}
{{/*
loadBalancer configuration for the the UI service.
Supported inputs are Values.ui
*/}}
{{- define "service.loadBalancer" -}}
{{- if eq (.serviceType | toString) "LoadBalancer" }}
{{- if .loadBalancerIP }}
loadBalancerIP: {{ .loadBalancerIP }}
{{- end }}
{{- with .loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range . }}
- {{ . }}
{{- end }}
{{- end -}}
{{- end }}
{{- end -}}

View file

@ -44,7 +44,7 @@ spec:
- name: providervol - name: providervol
mountPath: "/provider" mountPath: "/provider"
- name: mountpoint-dir - name: mountpoint-dir
mountPath: /var/lib/kubelet/pods mountPath: {{ .Values.csi.daemonSet.kubeletRootDir }}/pods
mountPropagation: HostToContainer mountPropagation: HostToContainer
{{- if .Values.csi.volumeMounts }} {{- if .Values.csi.volumeMounts }}
{{- toYaml .Values.csi.volumeMounts | nindent 12}} {{- toYaml .Values.csi.volumeMounts | nindent 12}}

View file

@ -110,6 +110,10 @@ spec:
value: "{{ .Values.injector.agentDefaults.template }}" value: "{{ .Values.injector.agentDefaults.template }}"
- name: AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE - name: AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE
value: "{{ .Values.injector.agentDefaults.templateConfig.exitOnRetryFailure }}" value: "{{ .Values.injector.agentDefaults.templateConfig.exitOnRetryFailure }}"
{{- if .Values.injector.agentDefaults.templateConfig.staticSecretRenderInterval }}
- name: AGENT_INJECT_TEMPLATE_STATIC_SECRET_RENDER_INTERVAL
value: "{{ .Values.injector.agentDefaults.templateConfig.staticSecretRenderInterval }}"
{{- end }}
{{- include "vault.extraEnvironmentVars" .Values.injector | nindent 12 }} {{- include "vault.extraEnvironmentVars" .Values.injector | nindent 12 }}
- name: POD_NAME - name: POD_NAME
valueFrom: valueFrom:
@ -138,35 +142,6 @@ spec:
periodSeconds: 2 periodSeconds: 2
successThreshold: 1 successThreshold: 1
timeoutSeconds: 5 timeoutSeconds: 5
{{- if and (eq (.Values.injector.leaderElector.enabled | toString) "true") (gt (.Values.injector.replicas | int) 1) (eq (.Values.injector.leaderElector.useContainer | toString) "true") }}
- name: leader-elector
image: {{ .Values.injector.leaderElector.image.repository }}:{{ .Values.injector.leaderElector.image.tag }}
args:
- --election={{ template "vault.fullname" . }}-agent-injector-leader
- --election-namespace={{ .Release.Namespace }}
- --http=0.0.0.0:4040
- --ttl={{ .Values.injector.leaderElector.ttl }}
livenessProbe:
httpGet:
path: /
port: 4040
scheme: HTTP
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 4040
scheme: HTTP
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 5
{{- end }}
{{- if .Values.injector.certs.secretName }} {{- if .Values.injector.certs.secretName }}
volumeMounts: volumeMounts:
- name: webhook-certs - name: webhook-certs

View file

@ -1,14 +0,0 @@
{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") (eq (.Values.injector.leaderElector.enabled | toString) "true") (gt (.Values.injector.replicas | int) 1) (eq (.Values.injector.leaderElector.useContainer | toString) "true")}}
# This is created here so it can be cleaned up easily, since if
# the endpoint is left around the leader won't expire for about a minute.
apiVersion: v1
kind: Endpoints
metadata:
name: {{ template "vault.fullname" . }}-agent-injector-leader
annotations:
deprecated: "true"
labels:
app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}

View file

@ -9,7 +9,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["secrets", "configmaps", "endpoints"] resources: ["secrets", "configmaps"]
verbs: verbs:
- "create" - "create"
- "get" - "get"

View file

@ -21,6 +21,7 @@ spec:
{{- if .Values.server.service.clusterIP }} {{- if .Values.server.service.clusterIP }}
clusterIP: {{ .Values.server.service.clusterIP }} clusterIP: {{ .Values.server.service.clusterIP }}
{{- end }} {{- end }}
{{- include "service.externalTrafficPolicy" .Values.server.service }}
publishNotReadyAddresses: true publishNotReadyAddresses: true
ports: ports:
- name: {{ include "vault.scheme" . }} - name: {{ include "vault.scheme" . }}

View file

@ -21,6 +21,7 @@ spec:
{{- if .Values.server.service.clusterIP }} {{- if .Values.server.service.clusterIP }}
clusterIP: {{ .Values.server.service.clusterIP }} clusterIP: {{ .Values.server.service.clusterIP }}
{{- end }} {{- end }}
{{- include "service.externalTrafficPolicy" .Values.server.service }}
publishNotReadyAddresses: true publishNotReadyAddresses: true
ports: ports:
- name: {{ include "vault.scheme" . }} - name: {{ include "vault.scheme" . }}
@ -38,4 +39,4 @@ spec:
component: server component: server
vault-active: "false" vault-active: "false"
{{- end }} {{- end }}
{{- end }} {{- end }}

View file

@ -8,7 +8,11 @@
{{- $serviceName = printf "%s-%s" $serviceName "active" -}} {{- $serviceName = printf "%s-%s" $serviceName "active" -}}
{{- end }} {{- end }}
{{- $servicePort := .Values.server.service.port -}} {{- $servicePort := .Values.server.service.port -}}
{{ if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} {{- $pathType := .Values.server.ingress.pathType -}}
{{- $kubeVersion := .Capabilities.KubeVersion.Version }}
{{ if semverCompare ">= 1.19.0-0" $kubeVersion }}
apiVersion: networking.k8s.io/v1
{{ else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
apiVersion: networking.k8s.io/v1beta1 apiVersion: networking.k8s.io/v1beta1
{{ else }} {{ else }}
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
@ -36,6 +40,9 @@ spec:
{{- end }} {{- end }}
secretName: {{ .secretName }} secretName: {{ .secretName }}
{{- end }} {{- end }}
{{- end }}
{{- if .Values.server.ingress.ingressClassName }}
ingressClassName: {{ .Values.server.ingress.ingressClassName }}
{{- end }} {{- end }}
rules: rules:
{{- range .Values.server.ingress.hosts }} {{- range .Values.server.ingress.hosts }}
@ -47,9 +54,19 @@ spec:
{{- end }} {{- end }}
{{- range (.paths | default (list "/")) }} {{- range (.paths | default (list "/")) }}
- path: {{ . }} - path: {{ . }}
{{ if semverCompare ">= 1.19.0-0" $kubeVersion }}
pathType: {{ $pathType }}
{{ end }}
backend: backend:
{{ if semverCompare ">= 1.19.0-0" $kubeVersion }}
service:
name: {{ $serviceName }}
port:
number: {{ $servicePort }}
{{ else }}
serviceName: {{ $serviceName }} serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }} servicePort: {{ $servicePort }}
{{ end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}

View file

@ -21,6 +21,7 @@ spec:
{{- if .Values.server.service.clusterIP }} {{- if .Values.server.service.clusterIP }}
clusterIP: {{ .Values.server.service.clusterIP }} clusterIP: {{ .Values.server.service.clusterIP }}
{{- end }} {{- end }}
{{- include "service.externalTrafficPolicy" .Values.server.service }}
# We want the servers to become available even if they're not ready # We want the servers to become available even if they're not ready
# since this DNS is also used for join operations. # since this DNS is also used for join operations.
publishNotReadyAddresses: true publishNotReadyAddresses: true

View file

@ -30,16 +30,8 @@ spec:
nodePort: {{ .Values.ui.serviceNodePort }} nodePort: {{ .Values.ui.serviceNodePort }}
{{- end }} {{- end }}
type: {{ .Values.ui.serviceType }} type: {{ .Values.ui.serviceType }}
{{- if and (eq (.Values.ui.serviceType | toString) "LoadBalancer") (.Values.ui.loadBalancerSourceRanges) }} {{- include "service.externalTrafficPolicy" .Values.ui }}
loadBalancerSourceRanges: {{- include "service.loadBalancer" .Values.ui }}
{{- range $cidr := .Values.ui.loadBalancerSourceRanges }}
- {{ $cidr }}
{{- end }}
{{- end }}
{{- if and (eq (.Values.ui.serviceType | toString) "LoadBalancer") (.Values.ui.loadBalancerIP) }}
loadBalancerIP: {{ .Values.ui.loadBalancerIP }}
{{- end }}
{{- end -}} {{- end -}}
{{- end }} {{- end }}
{{- end }} {{- end }}

View file

@ -4,6 +4,8 @@
The Makefile at the top level of this repo contains a few target that should help with running acceptance tests in your own GKE instance or in a kind cluster. The Makefile at the top level of this repo contains a few target that should help with running acceptance tests in your own GKE instance or in a kind cluster.
Note that for the Vault Enterprise tests to pass, a `VAULT_LICENSE_CI` environment variable needs to be set to the contents of a valid Vault Enterprise license.
### Running in a GKE cluster ### Running in a GKE cluster
* Set the `GOOGLE_CREDENTIALS` and `CLOUDSDK_CORE_PROJECT` variables at the top of the file. `GOOGLE_CREDENTIALS` should contain the local path to your Google Cloud Platform account credentials in JSON format. `CLOUDSDK_CORE_PROJECT` should be set to the ID of your GCP project. * Set the `GOOGLE_CREDENTIALS` and `CLOUDSDK_CORE_PROJECT` variables at the top of the file. `GOOGLE_CREDENTIALS` should contain the local path to your Google Cloud Platform account credentials in JSON format. `CLOUDSDK_CORE_PROJECT` should be set to the ID of your GCP project.

View file

@ -12,8 +12,7 @@ load _helpers
helm install "$(name_prefix)" \ helm install "$(name_prefix)" \
--wait \ --wait \
--timeout=5m \ --timeout=5m \
--set="injector.replicas=3" \ --set="injector.replicas=3" .
--set="injector.leaderElector.useContainer=true" .
kubectl wait --for condition=Ready pod -l app.kubernetes.io/name=vault-agent-injector --timeout=5m kubectl wait --for condition=Ready pod -l app.kubernetes.io/name=vault-agent-injector --timeout=5m
pods=($(kubectl get pods -l app.kubernetes.io/name=vault-agent-injector -o json | jq -r '.items[] | .metadata.name')) pods=($(kubectl get pods -l app.kubernetes.io/name=vault-agent-injector -o json | jq -r '.items[] | .metadata.name'))
@ -23,21 +22,15 @@ load _helpers
tries=0 tries=0
until [ $tries -ge 60 ] until [ $tries -ge 60 ]
do do
## The new internal leader mechanism uses a ConfigMap
owner=$(kubectl get configmaps vault-k8s-leader -o json | jq -r .metadata.ownerReferences\[0\].name) owner=$(kubectl get configmaps vault-k8s-leader -o json | jq -r .metadata.ownerReferences\[0\].name)
leader=$(kubectl get pods $owner -o json | jq -r .metadata.name) leader=$(kubectl get pods $owner -o json | jq -r .metadata.name)
[ -n "${leader}" ] && [ "${leader}" != "null" ] && break [ -n "${leader}" ] && [ "${leader}" != "null" ] && break
## Also check the old leader-elector container
old_leader="$(echo "$(kubectl exec ${pods[0]} -c sidecar-injector -- wget --quiet --output-document - localhost:4040)" | jq -r .name)"
[ -n "${old_leader}" ] && break
((++tries)) ((++tries))
sleep .5 sleep .5
done done
# Check the leader name is valid - i.e. one of the 3 pods # Check the leader name is valid - i.e. one of the 3 pods
[[ " ${pods[@]} " =~ " ${leader} " || " ${pods[@]} " =~ " ${old_leader} " ]] [[ " ${pods[@]} " =~ " ${leader} " ]]
} }

View file

@ -7,7 +7,7 @@ load _helpers
helm install "$(name_prefix)-east" \ helm install "$(name_prefix)-east" \
--set='server.image.repository=hashicorp/vault-enterprise' \ --set='server.image.repository=hashicorp/vault-enterprise' \
--set='server.image.tag=1.8.2_ent' \ --set='server.image.tag=1.9.0_ent' \
--set='injector.enabled=false' \ --set='injector.enabled=false' \
--set='server.ha.enabled=true' \ --set='server.ha.enabled=true' \
--set='server.ha.raft.enabled=true' \ --set='server.ha.raft.enabled=true' \
@ -77,7 +77,7 @@ load _helpers
helm install "$(name_prefix)-west" \ helm install "$(name_prefix)-west" \
--set='injector.enabled=false' \ --set='injector.enabled=false' \
--set='server.image.repository=hashicorp/vault-enterprise' \ --set='server.image.repository=hashicorp/vault-enterprise' \
--set='server.image.tag=1.8.2_ent' \ --set='server.image.tag=1.9.0_ent' \
--set='server.ha.enabled=true' \ --set='server.ha.enabled=true' \
--set='server.ha.raft.enabled=true' \ --set='server.ha.raft.enabled=true' \
--set='server.enterpriseLicense.secretName=vault-license' . --set='server.enterpriseLicense.secretName=vault-license' .

View file

@ -8,7 +8,7 @@ load _helpers
helm install "$(name_prefix)-east" \ helm install "$(name_prefix)-east" \
--set='injector.enabled=false' \ --set='injector.enabled=false' \
--set='server.image.repository=hashicorp/vault-enterprise' \ --set='server.image.repository=hashicorp/vault-enterprise' \
--set='server.image.tag=1.8.2_ent' \ --set='server.image.tag=1.9.0_ent' \
--set='server.ha.enabled=true' \ --set='server.ha.enabled=true' \
--set='server.ha.raft.enabled=true' \ --set='server.ha.raft.enabled=true' \
--set='server.enterpriseLicense.secretName=vault-license' . --set='server.enterpriseLicense.secretName=vault-license' .
@ -77,7 +77,7 @@ load _helpers
helm install "$(name_prefix)-west" \ helm install "$(name_prefix)-west" \
--set='injector.enabled=false' \ --set='injector.enabled=false' \
--set='server.image.repository=hashicorp/vault-enterprise' \ --set='server.image.repository=hashicorp/vault-enterprise' \
--set='server.image.tag=1.8.2_ent' \ --set='server.image.tag=1.9.0_ent' \
--set='server.ha.enabled=true' \ --set='server.ha.enabled=true' \
--set='server.ha.raft.enabled=true' \ --set='server.ha.raft.enabled=true' \
--set='server.enterpriseLicense.secretName=vault-license' . --set='server.enterpriseLicense.secretName=vault-license' .

View file

@ -8,7 +8,7 @@ resource "random_id" "suffix" {
data "google_container_engine_versions" "main" { data "google_container_engine_versions" "main" {
location = "${var.zone}" location = "${var.zone}"
version_prefix = "1.18." version_prefix = "1.19."
} }
data "google_service_account" "gcpapi" { data "google_service_account" "gcpapi" {

View file

@ -717,3 +717,28 @@ load _helpers
yq -r 'map(select(.name=="AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE")) | .[] .value' | tee /dev/stderr) yq -r 'map(select(.name=="AGENT_INJECT_TEMPLATE_CONFIG_EXIT_ON_RETRY_FAILURE")) | .[] .value' | tee /dev/stderr)
[ "${value}" = "false" ] [ "${value}" = "false" ]
} }
@test "injector/deployment: agent default template_config.static_secret_render_interval" {
cd `chart_dir`
local object=$(helm template \
--show-only templates/injector-deployment.yaml \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="AGENT_INJECT_TEMPLATE_STATIC_SECRET_RENDER_INTERVAL")) | .[] .value' | tee /dev/stderr)
[ "${value}" = "" ]
}
@test "injector/deployment: can set agent template_config.static_secret_render_interval" {
cd `chart_dir`
local object=$(helm template \
--show-only templates/injector-deployment.yaml \
--set='injector.agentDefaults.templateConfig.staticSecretRenderInterval=1m' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="AGENT_INJECT_TEMPLATE_STATIC_SECRET_RENDER_INTERVAL")) | .[] .value' | tee /dev/stderr)
[ "${value}" = "1m" ]
}

View file

@ -166,108 +166,3 @@ load _helpers
yq 'length > 0' | tee /dev/stderr) yq 'length > 0' | tee /dev/stderr)
[ "${actual}" = "true" ] [ "${actual}" = "true" ]
} }
#--------------------------------------------------------------------
# Old leader-elector container support
# Note: deprecated and will be removed soon
@test "injector/deployment: leader elector - sidecar is created only when enabled" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/injector-deployment.yaml \
. | tee /dev/stderr |
yq '.spec.template.spec.containers | length' | tee /dev/stderr)
[ "${actual}" = "1" ]
local actual=$(helm template \
--show-only templates/injector-deployment.yaml \
--set "injector.replicas=2" \
--set "injector.leaderElector.enabled=false" \
. | tee /dev/stderr |
yq '.spec.template.spec.containers | length' | tee /dev/stderr)
[ "${actual}" = "1" ]
local actual=$(helm template \
--show-only templates/injector-deployment.yaml \
--set "injector.replicas=2" \
--set "injector.leaderElector.useContainer=true" \
. | tee /dev/stderr |
yq '.spec.template.spec.containers | length' | tee /dev/stderr)
[ "${actual}" = "2" ]
}
@test "injector/deployment: leader elector image name is configurable" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/injector-deployment.yaml \
--set "injector.replicas=2" \
--set "injector.leaderElector.useContainer=true" \
--set "injector.leaderElector.image.repository=SomeOtherImage" \
--set "injector.leaderElector.image.tag=SomeOtherTag" \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[1].image' | tee /dev/stderr)
[ "${actual}" = "SomeOtherImage:SomeOtherTag" ]
}
@test "injector/deployment: leader elector TTL is configurable" {
cd `chart_dir`
# Default value 60s
local actual=$(helm template \
--show-only templates/injector-deployment.yaml \
--set "injector.replicas=2" \
--set "injector.leaderElector.useContainer=true" \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[1].args[3]' | tee /dev/stderr)
[ "${actual}" = "--ttl=60s" ]
# Configured to 30s
local actual=$(helm template \
--show-only templates/injector-deployment.yaml \
--set "injector.replicas=2" \
--set "injector.leaderElector.useContainer=true" \
--set "injector.leaderElector.ttl=30s" \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[1].args[3]' | tee /dev/stderr)
[ "${actual}" = "--ttl=30s" ]
}
@test "injector/leader-endpoint: created/skipped as appropriate" {
cd `chart_dir`
local actual=$( (helm template \
--show-only templates/injector-leader-endpoint.yaml \
. || echo "---") | tee /dev/stderr |
yq 'length > 0' | tee /dev/stderr)
[ "${actual}" = "false" ]
local actual=$( (helm template \
--show-only templates/injector-leader-endpoint.yaml \
--set "injector.replicas=2" \
--set "global.enabled=false" \
. || echo "---") | tee /dev/stderr |
yq 'length > 0' | tee /dev/stderr)
[ "${actual}" = "false" ]
local actual=$( (helm template \
--show-only templates/injector-leader-endpoint.yaml \
--set "injector.replicas=2" \
--set "injector.enabled=false" \
. || echo "---") | tee /dev/stderr |
yq 'length > 0' | tee /dev/stderr)
[ "${actual}" = "false" ]
local actual=$( (helm template \
--show-only templates/injector-leader-endpoint.yaml \
--set "injector.replicas=2" \
--set "injector.leaderElector.enabled=false" \
. || echo "---") | tee /dev/stderr |
yq 'length > 0' | tee /dev/stderr)
[ "${actual}" = "false" ]
local actual=$( (helm template \
--show-only templates/injector-leader-endpoint.yaml \
--set "injector.replicas=2" \
--set "injector.leaderElector.useContainer=true" \
. || echo "---") | tee /dev/stderr |
yq 'length > 0' | tee /dev/stderr)
[ "${actual}" = "true" ]
}

View file

@ -157,3 +157,43 @@ load _helpers
yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr) yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr)
[ "${actual}" = "https" ] [ "${actual}" = "https" ]
} }
# duplicated in server-service.bats
@test "server/ha-active-Service: NodePort assert externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ha-active-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=NodePort' \
--set 'server.service.externalTrafficPolicy=Foo' \
. | tee /dev/stderr |
yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "Foo" ]
}
# duplicated in server-service.bats
@test "server/ha-active-Service: NodePort assert no externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ha-active-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=NodePort' \
--set 'server.service.externalTrafficPolicy=' \
. | tee /dev/stderr |
yq '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
# duplicated in server-service.bats
@test "server/ha-active-Service: ClusterIP assert no externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ha-active-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=ClusterIP' \
--set 'server.service.externalTrafficPolicy=Foo' \
. | tee /dev/stderr |
yq '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}

View file

@ -168,3 +168,43 @@ load _helpers
yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr) yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr)
[ "${actual}" = "https" ] [ "${actual}" = "https" ]
} }
# duplicated in server-service.bats
@test "server/ha-standby-Service: NodePort assert externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ha-standby-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=NodePort' \
--set 'server.service.externalTrafficPolicy=Foo' \
. | tee /dev/stderr |
yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "Foo" ]
}
# duplicated in server-service.bats
@test "server/ha-standby-Service: NodePort assert no externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ha-standby-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=NodePort' \
--set 'server.service.externalTrafficPolicy=' \
. | tee /dev/stderr |
yq '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
# duplicated in server-service.bats
@test "server/ha-standby-Service: ClusterIP assert no externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ha-standby-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=ClusterIP' \
--set 'server.service.externalTrafficPolicy=Foo' \
. | tee /dev/stderr |
yq '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}

View file

@ -52,7 +52,7 @@ load _helpers
--set 'server.ingress.hosts[0].host=test.com' \ --set 'server.ingress.hosts[0].host=test.com' \
--set 'server.ingress.hosts[0].paths[0]=/' \ --set 'server.ingress.hosts[0].paths[0]=/' \
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].backend.serviceName | length > 0' | tee /dev/stderr) yq -r '.spec.rules[0].http.paths[0].backend.service.name | length > 0' | tee /dev/stderr)
[ "${actual}" = "true" ] [ "${actual}" = "true" ]
} }
@ -66,9 +66,9 @@ load _helpers
--set 'server.ingress.hosts[0].host=test.com' \ --set 'server.ingress.hosts[0].host=test.com' \
--set 'server.ingress.hosts[0].paths[0]=/' \ --set 'server.ingress.hosts[0].paths[0]=/' \
--set 'server.ingress.extraPaths[0].path=/annotation-service' \ --set 'server.ingress.extraPaths[0].path=/annotation-service' \
--set 'server.ingress.extraPaths[0].backend.serviceName=ssl-redirect' \ --set 'server.ingress.extraPaths[0].backend.service.name=ssl-redirect' \
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr) yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr)
[ "${actual}" = 'ssl-redirect' ] [ "${actual}" = 'ssl-redirect' ]
local actual=$(helm template \ local actual=$(helm template \
@ -77,7 +77,7 @@ load _helpers
--set 'server.ingress.hosts[0].host=test.com' \ --set 'server.ingress.hosts[0].host=test.com' \
--set 'server.ingress.hosts[0].paths[0]=/' \ --set 'server.ingress.hosts[0].paths[0]=/' \
--set 'server.ingress.extraPaths[0].path=/annotation-service' \ --set 'server.ingress.extraPaths[0].path=/annotation-service' \
--set 'server.ingress.extraPaths[0].backend.serviceName=ssl-redirect' \ --set 'server.ingress.extraPaths[0].backend.service.name=ssl-redirect' \
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].path' | tee /dev/stderr) yq -r '.spec.rules[0].http.paths[0].path' | tee /dev/stderr)
[ "${actual}" = '/annotation-service' ] [ "${actual}" = '/annotation-service' ]
@ -88,7 +88,7 @@ load _helpers
--set 'server.ingress.hosts[0].host=test.com' \ --set 'server.ingress.hosts[0].host=test.com' \
--set 'server.ingress.hosts[0].paths[0]=/' \ --set 'server.ingress.hosts[0].paths[0]=/' \
--set 'server.ingress.extraPaths[0].path=/annotation-service' \ --set 'server.ingress.extraPaths[0].path=/annotation-service' \
--set 'server.ingress.extraPaths[0].backend.serviceName=ssl-redirect' \ --set 'server.ingress.extraPaths[0].backend.service.name=ssl-redirect' \
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[1].path' | tee /dev/stderr) yq -r '.spec.rules[0].http.paths[1].path' | tee /dev/stderr)
[ "${actual}" = '/' ] [ "${actual}" = '/' ]
@ -131,6 +131,29 @@ load _helpers
[ "${actual}" = "nginx" ] [ "${actual}" = "nginx" ]
} }
@test "server/ingress: ingressClassName added to object spec - string" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ingress.yaml \
--set 'server.ingress.enabled=true' \
--set server.ingress.ingressClassName=nginx \
. | tee /dev/stderr |
yq -r '.spec.ingressClassName' | tee /dev/stderr)
[ "${actual}" = "nginx" ]
}
@test "server/ingress: ingressClassName is not added by default" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ingress.yaml \
--set 'server.ingress.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.ingressClassName' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "server/ingress: uses active service when ha by default - yaml" { @test "server/ingress: uses active service when ha by default - yaml" {
cd `chart_dir` cd `chart_dir`
@ -141,7 +164,7 @@ load _helpers
--set 'server.ha.enabled=true' \ --set 'server.ha.enabled=true' \
--set 'server.service.enabled=true' \ --set 'server.service.enabled=true' \
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr) yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr)
[ "${actual}" = "RELEASE-NAME-vault-active" ] [ "${actual}" = "RELEASE-NAME-vault-active" ]
} }
@ -156,7 +179,7 @@ load _helpers
--set 'server.ha.enabled=true' \ --set 'server.ha.enabled=true' \
--set 'server.service.enabled=true' \ --set 'server.service.enabled=true' \
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr) yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr)
[ "${actual}" = "RELEASE-NAME-vault" ] [ "${actual}" = "RELEASE-NAME-vault" ]
} }
@ -170,6 +193,21 @@ load _helpers
--set 'server.ha.enabled=false' \ --set 'server.ha.enabled=false' \
--set 'server.service.enabled=true' \ --set 'server.service.enabled=true' \
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr)
[ "${actual}" = "RELEASE-NAME-vault" ]
}
@test "server/ingress: k8s 1.18.3 uses regular service when not ha - yaml" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ingress.yaml \
--set 'server.ingress.enabled=true' \
--set 'server.dev.enabled=false' \
--set 'server.ha.enabled=false' \
--set 'server.service.enabled=true' \
--kube-version 1.18.3 \
. | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr) yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr)
[ "${actual}" = "RELEASE-NAME-vault" ] [ "${actual}" = "RELEASE-NAME-vault" ]
} }
@ -185,6 +223,45 @@ load _helpers
--set 'server.ha.enabled=false' \ --set 'server.ha.enabled=false' \
--set 'server.service.enabled=true' \ --set 'server.service.enabled=true' \
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].backend.serviceName' | tee /dev/stderr) yq -r '.spec.rules[0].http.paths[0].backend.service.name' | tee /dev/stderr)
[ "${actual}" = "RELEASE-NAME-vault" ] [ "${actual}" = "RELEASE-NAME-vault" ]
} }
@test "server/ingress: pathType is added to Kubernetes version == 1.19.0" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ingress.yaml \
--set 'server.ingress.enabled=true' \
--set server.ingress.pathType=ImplementationSpecific \
--kube-version 1.19.0 \
. | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].pathType' | tee /dev/stderr)
[ "${actual}" = "ImplementationSpecific" ]
}
@test "server/ingress: pathType is not added to Kubernetes versions < 1.19" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ingress.yaml \
--set 'server.ingress.enabled=true' \
--set server.ingress.pathType=ImplementationSpecific \
--kube-version 1.18.3 \
. | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].pathType' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "server/ingress: pathType is added to Kubernetes versions > 1.19" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-ingress.yaml \
--set 'server.ingress.enabled=true' \
--set server.ingress.pathType=Prefix \
--kube-version 1.20.0 \
. | tee /dev/stderr |
yq -r '.spec.rules[0].http.paths[0].pathType' | tee /dev/stderr)
[ "${actual}" = "Prefix" ]
}

View file

@ -384,3 +384,43 @@ load _helpers
yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr) yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr)
[ "${actual}" = "https" ] [ "${actual}" = "https" ]
} }
# duplicated in server-ha-active-service.bats
@test "server/Service: NodePort assert externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=NodePort' \
--set 'server.service.externalTrafficPolicy=Foo' \
. | tee /dev/stderr |
yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "Foo" ]
}
# duplicated in server-ha-active-service.bats
@test "server/ha-active-Service: NodePort assert no externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=NodePort' \
--set 'server.service.externalTrafficPolicy=' \
. | tee /dev/stderr |
yq '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
# duplicated in server-ha-active-service.bats
@test "server/Service: ClusterIP assert no externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-service.yaml \
--set 'server.ha.enabled=true' \
--set 'server.service.type=ClusterIP' \
--set 'server.service.externalTrafficPolicy=Foo' \
. | tee /dev/stderr |
yq '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}

View file

@ -135,6 +135,16 @@ load _helpers
. | tee /dev/stderr | . | tee /dev/stderr |
yq -r '.spec.type' | tee /dev/stderr) yq -r '.spec.type' | tee /dev/stderr)
[ "${actual}" = "LoadBalancer" ] [ "${actual}" = "LoadBalancer" ]
local actual=$(helm template \
--show-only templates/ui-service.yaml \
--set 'server.standalone.enabled=true' \
--set 'ui.serviceType=LoadBalancer' \
--set 'ui.externalTrafficPolicy=Local' \
--set 'ui.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "Local" ]
} }
@test "ui/Service: LoadBalancerIP set if specified and serviceType == LoadBalancer" { @test "ui/Service: LoadBalancerIP set if specified and serviceType == LoadBalancer" {
@ -183,6 +193,19 @@ load _helpers
[ "${actual}" = "null" ] [ "${actual}" = "null" ]
} }
@test "ui/Service: ClusterIP assert no externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/ui-service.yaml \
--set 'server.standalone.enabled=true' \
--set 'ui.serviceType=ClusterIP' \
--set 'ui.externalTrafficPolicy=Foo' \
--set 'ui.enabled=true' \
. | tee /dev/stderr |
yq '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "ui/Service: specify annotations" { @test "ui/Service: specify annotations" {
cd `chart_dir` cd `chart_dir`
local actual=$(helm template \ local actual=$(helm template \
@ -323,3 +346,30 @@ load _helpers
yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) yq -r '.spec.ports[0].nodePort' | tee /dev/stderr)
[ "${actual}" = "123" ] [ "${actual}" = "123" ]
} }
@test "ui/Service: LoadBalancer assert externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/ui-service.yaml \
--set 'ui.enabled=true' \
--set 'server.standalone.enabled=true' \
--set 'ui.serviceType=LoadBalancer' \
--set 'ui.externalTrafficPolicy=Foo' \
. | tee /dev/stderr |
yq -r '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "Foo" ]
}
@test "ui/Service: LoadBalancer assert no externalTrafficPolicy" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/ui-service.yaml \
--set 'ui.enabled=true' \
--set 'server.standalone.enabled=true' \
--set 'ui.serviceType=LoadBalancer' \
--set 'ui.externalTrafficPolicy=' \
. | tee /dev/stderr |
yq '.spec.externalTrafficPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}

View file

@ -6,13 +6,13 @@ global:
injector: injector:
image: image:
repository: "registry.connect.redhat.com/hashicorp/vault-k8s" repository: "registry.connect.redhat.com/hashicorp/vault-k8s"
tag: "0.13.0-ubi" tag: "0.14.1-ubi"
agentImage: agentImage:
repository: "registry.connect.redhat.com/hashicorp/vault" repository: "registry.connect.redhat.com/hashicorp/vault"
tag: "1.8.2-ubi" tag: "1.9.0-ubi"
server: server:
image: image:
repository: "registry.connect.redhat.com/hashicorp/vault" repository: "registry.connect.redhat.com/hashicorp/vault"
tag: "1.8.2-ubi" tag: "1.9.0-ubi"

View file

@ -205,6 +205,9 @@
"properties": { "properties": {
"exitOnRetryFailure": { "exitOnRetryFailure": {
"type": "boolean" "type": "boolean"
},
"staticSecretRenderInterval": {
"type": "string"
} }
} }
} }
@ -287,23 +290,6 @@
"properties": { "properties": {
"enabled": { "enabled": {
"type": "boolean" "type": "boolean"
},
"image": {
"type": "object",
"properties": {
"repository": {
"type": "string"
},
"tag": {
"type": "string"
}
}
},
"ttl": {
"type": "string"
},
"useContainer": {
"type": "boolean"
} }
} }
}, },
@ -614,6 +600,9 @@
} }
} }
}, },
"ingressClassName": {
"type": "string"
},
"labels": { "labels": {
"type": "object" "type": "object"
}, },

View file

@ -37,16 +37,6 @@ injector:
# so that only one injector attempts to create TLS certificates. # so that only one injector attempts to create TLS certificates.
leaderElector: leaderElector:
enabled: true enabled: true
# Note: The deployment of the leader-elector container will soon be removed
# from this chart since vault-k8s now uses an internal mechanism to
# determine leadership.
# To enable the deployment of the leader-elector container for use with
# vault-k8s 0.12.0 and earlier, set `useContainer=true`
useContainer: false
image:
repository: "gcr.io/google_containers/leader-elector"
tag: "0.4"
ttl: 60s
# If true, will enable a node exporter metrics endpoint at /metrics. # If true, will enable a node exporter metrics endpoint at /metrics.
metrics: metrics:
@ -59,7 +49,7 @@ injector:
# image sets the repo and tag of the vault-k8s image to use for the injector. # image sets the repo and tag of the vault-k8s image to use for the injector.
image: image:
repository: "hashicorp/vault-k8s" repository: "hashicorp/vault-k8s"
tag: "0.13.0" tag: "0.14.1"
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# agentImage sets the repo and tag of the Vault image to use for the Vault Agent # agentImage sets the repo and tag of the Vault image to use for the Vault Agent
@ -67,7 +57,7 @@ injector:
# required. # required.
agentImage: agentImage:
repository: "hashicorp/vault" repository: "hashicorp/vault"
tag: "1.8.2" tag: "1.9.0"
# The default values for the injected Vault Agent containers. # The default values for the injected Vault Agent containers.
agentDefaults: agentDefaults:
@ -85,6 +75,7 @@ injector:
# Default values within Agent's template_config stanza. # Default values within Agent's template_config stanza.
templateConfig: templateConfig:
exitOnRetryFailure: true exitOnRetryFailure: true
staticSecretRenderInterval: ""
# Mount Path of the Vault Kubernetes Auth Method. # Mount Path of the Vault Kubernetes Auth Method.
authPath: "auth/kubernetes" authPath: "auth/kubernetes"
@ -236,7 +227,7 @@ server:
image: image:
repository: "hashicorp/vault" repository: "hashicorp/vault"
tag: "1.8.2" tag: "1.9.0"
# Overrides the default Image Pull Policy # Overrides the default Image Pull Policy
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@ -277,6 +268,14 @@ server:
# kubernetes.io/ingress.class: nginx # kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true" # kubernetes.io/tls-acme: "true"
# Optionally use ingressClassName instead of deprecated annotation.
# See: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation
ingressClassName: ""
# As of Kubernetes 1.19, all Ingress Paths must have a pathType configured. The default value below should be sufficient in most cases.
# See: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types for other possible values.
pathType: Prefix
# When HA mode is enabled and K8s service registration is being used, # When HA mode is enabled and K8s service registration is being used,
# configure the ingress to point to the Vault active service. # configure the ingress to point to the Vault active service.
activeService: true activeService: true
@ -287,8 +286,10 @@ server:
extraPaths: [] extraPaths: []
# - path: /* # - path: /*
# backend: # backend:
# serviceName: ssl-redirect # service:
# servicePort: use-annotation # name: ssl-redirect
# port:
# number: use-annotation
tls: [] tls: []
# - secretName: chart-example-tls # - secretName: chart-example-tls
# hosts: # hosts:
@ -493,6 +494,12 @@ server:
# or NodePort. # or NodePort.
#type: ClusterIP #type: ClusterIP
# The externalTrafficPolicy can be set to either Cluster or Local
# and is only valid for LoadBalancer and NodePort service types.
# The default value is Cluster.
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy
externalTrafficPolicy: Cluster
# If type is set to "NodePort", a specific nodePort value can be configured, # If type is set to "NodePort", a specific nodePort value can be configured,
# will be random if left blank. # will be random if left blank.
#nodePort: 30000 #nodePort: 30000
@ -714,7 +721,13 @@ ui:
externalPort: 8200 externalPort: 8200
targetPort: 8200 targetPort: 8200
# loadBalancerSourceRanges: # The externalTrafficPolicy can be set to either Cluster or Local
# and is only valid for LoadBalancer and NodePort service types.
# The default value is Cluster.
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy
externalTrafficPolicy: Cluster
#loadBalancerSourceRanges:
# - 10.0.0.0/16 # - 10.0.0.0/16
# - 1.78.23.3/32 # - 1.78.23.3/32