Merge pull request #16 from openbao/bao-2-0-1

update used OpenBao Version to 2.0.1
This commit is contained in:
Jan Martens 2024-09-05 00:01:52 +02:00 committed by GitHub
commit f9daaad711
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
36 changed files with 175 additions and 386 deletions

View file

@ -10,14 +10,14 @@ jobs:
chart-verifier:
runs-on: ubuntu-latest
env:
CHART_VERIFIER_VERSION: '1.13.0'
CHART_VERIFIER_VERSION: "1.13.7"
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup test tools
uses: ./.github/actions/setup-test-tools
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: '1.21.3'
go-version: "1.22.5"
- run: go install "github.com/redhat-certification/chart-verifier@${CHART_VERIFIER_VERSION}"
- run: bats --tap --timing ./test/chart
permissions:

View file

@ -3,19 +3,29 @@
apiVersion: v2
name: openbao
version: 0.4.0
appVersion: v2.0.0-alpha20240329
version: 0.5.0
appVersion: v2.0.1
kubeVersion: ">= 1.27.0-0"
description: Official OpenBao Chart
home: https://github.com/openbao/openbao-helm
icon: https://github.com/openbao/artwork/blob/main/color/openbao-color.svg
keywords: ["vault", "openbao", "security", "encryption", "secrets", "management", "automation", "infrastructure"]
keywords:
[
"vault",
"openbao",
"security",
"encryption",
"secrets",
"management",
"automation",
"infrastructure",
]
sources:
- https://github.com/openbao/openbao-helm
annotations:
charts.openshift.io/name: Openbao
maintainers:
- name: OpenBao
email: openbao-security@lists.lfedge.org
url: https://openbao.org
- name: OpenBao
email: openbao-security@lists.lfedge.org
url: https://openbao.org

View file

@ -1,6 +1,6 @@
# openbao
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![AppVersion: v2.0.0-alpha20240329](https://img.shields.io/badge/AppVersion-v2.0.0--alpha20240329-informational?style=flat-square)
![Version: 0.5.0](https://img.shields.io/badge/Version-0.5.0-informational?style=flat-square) ![AppVersion: v2.0.1](https://img.shields.io/badge/AppVersion-v2.0.1-informational?style=flat-square)
Official OpenBao Chart

View file

@ -14,13 +14,13 @@ injector:
agentImage:
registry: "quay.io"
repository: "openbao/openbao"
tag: "v2.0.0-alpha20240329-ubi"
tag: "v2.0.1-ubi"
server:
image:
registry: "quay.io"
repository: "openbao/openbao"
tag: "v2.0.0-alpha20240329-ubi"
tag: "v2.0.1-ubi"
readinessProbe:
path: "/v1/sys/health?uninitcode=204"

View file

@ -84,7 +84,7 @@ injector:
# -- image repo to use for agent image
repository: "openbao/openbao"
# -- image tag to use for agent image
tag: "2.0.0-alpha20240329"
tag: "2.0.1"
# -- image pull policy to use for agent image. if tag is "latest", set to "Always"
pullPolicy: IfNotPresent
@ -379,7 +379,7 @@ server:
# -- image repo to use for server image
repository: "openbao/openbao"
# -- image tag to use for server image
tag: "2.0.0-alpha20240329"
tag: "2.0.1"
# -- image pull policy to use for server image. if tag is "latest", set to "Always"
pullPolicy: IfNotPresent
@ -1183,7 +1183,7 @@ csi:
# -- image repo to use for agent image
repository: "openbao/openbao"
# -- image tag to use for agent image
tag: "2.0.0-alpha20240329"
tag: "2.0.1"
# -- image pull policy to use for agent image. if tag is "latest", set to "Always"
pullPolicy: IfNotPresent

View file

@ -23,7 +23,7 @@ helm_install() {
helm install -f ${values} \
--name openbao \
${BATS_TEST_DIRNAME}/../..
${BATS_TEST_DIRNAME}/../../charts/openbao
}
# helm_install_ha installs the openbao chart using HA mode. This will source
@ -40,7 +40,7 @@ helm_install_ha() {
--name openbao \
--set 'server.enabled=false' \
--set 'serverHA.enabled=true' \
${BATS_TEST_DIRNAME}/../..
${BATS_TEST_DIRNAME}/../../charts/openbao
}
# wait for consul to be ready

View file

@ -2,73 +2,73 @@
load _helpers
@test "csi: testing deployment" {
cd `chart_dir`
# @test "csi: testing deployment" {
# cd `chart_dir`
kubectl delete namespace acceptance --ignore-not-found=true
kubectl create namespace acceptance
# kubectl delete namespace acceptance --ignore-not-found=true
# kubectl create namespace acceptance
# Install Secrets Store CSI driver
# Configure it to pass in a JWT for the provider to use, and rotate secrets rapidly
# so we can see Agent's cache working.
CSI_DRIVER_VERSION=1.3.2
helm install secrets-store-csi-driver secrets-store-csi-driver \
--repo https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts \
--version=$CSI_DRIVER_VERSION \
--wait --timeout=5m \
--namespace=acceptance \
--set linux.image.pullPolicy="IfNotPresent" \
--set tokenRequests[0].audience="openbao" \
--set enableSecretRotation=true \
--set rotationPollInterval=5s
# Install OpenBao and OpenBao provider
helm install openbao \
--wait --timeout=5m \
--namespace=acceptance \
--set="server.dev.enabled=true" \
--set="csi.enabled=true" \
--set="csi.debug=true" \
--set="csi.agent.logLevel=debug" \
--set="injector.enabled=false" \
.
kubectl --namespace=acceptance wait --for=condition=Ready --timeout=5m pod -l app.kubernetes.io/name=openbao
kubectl --namespace=acceptance wait --for=condition=Ready --timeout=5m pod -l app.kubernetes.io/name=openbao-csi-provider
# # Install Secrets Store CSI driver
# # Configure it to pass in a JWT for the provider to use, and rotate secrets rapidly
# # so we can see Agent's cache working.
# CSI_DRIVER_VERSION=1.3.2
# helm install secrets-store-csi-driver secrets-store-csi-driver \
# --repo https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts \
# --version=$CSI_DRIVER_VERSION \
# --wait --timeout=5m \
# --namespace=acceptance \
# --set linux.image.pullPolicy="IfNotPresent" \
# --set tokenRequests[0].audience="openbao" \
# --set enableSecretRotation=true \
# --set rotationPollInterval=5s
# # Install OpenBao and OpenBao provider
# helm install openbao \
# --wait --timeout=5m \
# --namespace=acceptance \
# --set="server.dev.enabled=true" \
# --set="csi.enabled=true" \
# --set="csi.debug=true" \
# --set="csi.agent.logLevel=debug" \
# --set="injector.enabled=false" \
# .
# kubectl --namespace=acceptance wait --for=condition=Ready --timeout=5m pod -l app.kubernetes.io/name=openbao
# kubectl --namespace=acceptance wait --for=condition=Ready --timeout=5m pod -l app.kubernetes.io/name=openbao-csi-provider
# Set up k8s auth and a kv secret.
cat ./test/acceptance/csi-test/openbao-policy.hcl | kubectl --namespace=acceptance exec -i openbao-0 -- openbao policy write kv-policy -
kubectl --namespace=acceptance exec openbao-0 -- bao auth enable kubernetes
kubectl --namespace=acceptance exec openbao-0 -- sh -c 'bao write auth/kubernetes/config \
kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443"'
kubectl --namespace=acceptance exec openbao-0 -- bao write auth/kubernetes/role/kv-role \
bound_service_account_names=nginx \
bound_service_account_namespaces=acceptance \
policies=kv-policy \
ttl=20m
kubectl --namespace=acceptance exec openbao-0 -- bao kv put secret/kv1 bar1=hello1
# # Set up k8s auth and a kv secret.
# cat ../../test/acceptance/csi-test/openbao-policy.hcl | kubectl --namespace=acceptance exec -i openbao-0 -- bao policy write kv-policy -
# kubectl --namespace=acceptance exec openbao-0 -- bao auth enable kubernetes
# kubectl --namespace=acceptance exec openbao-0 -- sh -c 'bao write auth/kubernetes/config \
# kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443"'
# kubectl --namespace=acceptance exec openbao-0 -- bao write auth/kubernetes/role/kv-role \
# bound_service_account_names=nginx \
# bound_service_account_namespaces=acceptance \
# policies=kv-policy \
# ttl=20m
# kubectl --namespace=acceptance exec openbao-0 -- bao kv put secret/kv1 bar1=hello1
kubectl --namespace=acceptance apply -f ./test/acceptance/csi-test/openbao-kv-secretproviderclass.yaml
kubectl --namespace=acceptance apply -f ./test/acceptance/csi-test/nginx.yaml
kubectl --namespace=acceptance wait --for=condition=Ready --timeout=5m pod nginx
# kubectl --namespace=acceptance apply -f ../../test/acceptance/csi-test/openbao-kv-secretproviderclass.yaml
# kubectl --namespace=acceptance apply -f ../../test/acceptance/csi-test/nginx.yaml
# kubectl --namespace=acceptance wait --for=condition=Ready --timeout=5m pod nginx
result=$(kubectl --namespace=acceptance exec nginx -- cat /mnt/secrets-store/bar)
[[ "$result" == "hello1" ]]
# result=$(kubectl --namespace=acceptance exec nginx -- cat /mnt/secrets-store/bar)
# [[ "$result" == "hello1" ]]
for i in $(seq 10); do
sleep 2
if [ "$(kubectl --namespace=acceptance logs --tail=-1 -l "app.kubernetes.io/name=openbao-csi-provider" -c openbao-agent | grep "secret renewed: path=/v1/auth/kubernetes/login")" ]; then
echo "Agent returned a cached login response"
return
fi
# for i in $(seq 10); do
# sleep 2
# if [ "$(kubectl --namespace=acceptance logs --tail=-1 -l "app.kubernetes.io/name=openbao-csi-provider" -c openbao-agent | grep "secret renewed: path=/v1/auth/kubernetes/login")" ]; then
# echo "Agent returned a cached login response"
# return
# fi
echo "Waiting to confirm the Agent is renewing CSI's auth token..."
done
# echo "Waiting to confirm the Agent is renewing CSI's auth token..."
# done
# Print the logs and fail the test
echo "Failed to find a log for the Agent renewing CSI's auth token"
kubectl --namespace=acceptance logs --tail=-1 -l "app.kubernetes.io/name=openbao-csi-provider" -c openbao-agent
kubectl --namespace=acceptance logs --tail=-1 -l "app.kubernetes.io/name=openbao-csi-provider" -c openbao-csi-provider
exit 1
}
# # Print the logs and fail the test
# echo "Failed to find a log for the Agent renewing CSI's auth token"
# kubectl --namespace=acceptance logs --tail=-1 -l "app.kubernetes.io/name=openbao-csi-provider" -c openbao-agent
# kubectl --namespace=acceptance logs --tail=-1 -l "app.kubernetes.io/name=openbao-csi-provider" -c openbao-csi-provider
# exit 1
# }
# Clean up
teardown() {

View file

@ -13,9 +13,9 @@ load _helpers
--wait \
--timeout=5m \
--set="injector.replicas=3" .
kubectl wait --for condition=Ready pod -l app.kubernetes.io/name=vault-agent-injector --timeout=5m
kubectl wait --for condition=Ready pod -l app.kubernetes.io/name=openbao-agent-injector --timeout=5m
pods=($(kubectl get pods -l app.kubernetes.io/name=vault-agent-injector -o json | jq -r '.items[] | .metadata.name'))
pods=($(kubectl get pods -l app.kubernetes.io/name=openbao-agent-injector -o json | jq -r '.items[] | .metadata.name'))
[ "${#pods[@]}" == 3 ]
leader=''
@ -49,4 +49,4 @@ teardown() {
kubectl delete --all pvc
kubectl delete namespace acceptance
fi
}
}

View file

@ -2,46 +2,46 @@
load _helpers
@test "injector: testing deployment" {
cd `chart_dir`
# @test "injector: testing deployment" {
# cd `chart_dir`
kubectl delete namespace acceptance --ignore-not-found=true
kubectl create namespace acceptance
kubectl config set-context --current --namespace=acceptance
# kubectl delete namespace acceptance --ignore-not-found=true
# kubectl create namespace acceptance
# kubectl config set-context --current --namespace=acceptance
kubectl create -f ./test/acceptance/injector-test/pg-deployment.yaml
sleep 5
wait_for_ready $(kubectl get pod -l app=postgres -o jsonpath="{.items[0].metadata.name}")
# kubectl create -f ../../test/acceptance/injector-test/pg-deployment.yaml
# sleep 5
# wait_for_ready $(kubectl get pod -l app=postgres -o jsonpath="{.items[0].metadata.name}")
kubectl create secret generic test \
--from-file ./test/acceptance/injector-test/pgdump-policy.hcl \
--from-file ./test/acceptance/injector-test/bootstrap.sh
# kubectl create secret generic test \
# --from-file ../../test/acceptance/injector-test/pgdump-policy.hcl \
# --from-file ../../test/acceptance/injector-test/bootstrap.sh
kubectl label secret test app=openbao-agent-demo
# kubectl label secret test app=openbao-agent-demo
helm install "$(name_prefix)" \
--set="server.extraVolumes[0].type=secret" \
--set="server.extraVolumes[0].name=test" .
wait_for_running $(name_prefix)-0
# helm install "$(name_prefix)" \
# --set="server.extraVolumes[0].type=secret" \
# --set="server.extraVolumes[0].name=test" .
# wait_for_running $(name_prefix)-0
wait_for_ready $(kubectl get pod -l component=webhook -o jsonpath="{.items[0].metadata.name}")
# wait_for_ready $(kubectl get pod -l component=webhook -o jsonpath="{.items[0].metadata.name}")
kubectl exec -ti "$(name_prefix)-0" -- /bin/sh -c "cp /openbao/userconfig/test/bootstrap.sh /tmp/bootstrap.sh && chmod +x /tmp/bootstrap.sh && /tmp/bootstrap.sh"
sleep 5
# kubectl exec -ti "$(name_prefix)-0" -- /bin/sh -c "cp /openbao/userconfig/test/bootstrap.sh /tmp/bootstrap.sh && chmod +x /tmp/bootstrap.sh && /tmp/bootstrap.sh"
# sleep 5
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-0" -- bao status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "false" ]
# # Sealed, not initialized
# local sealed_status=$(kubectl exec "$(name_prefix)-0" -- bao status -format=json |
# jq -r '.sealed' )
# [ "${sealed_status}" == "false" ]
local init_status=$(kubectl exec "$(name_prefix)-0" -- bao status -format=json |
jq -r '.initialized')
[ "${init_status}" == "true" ]
# local init_status=$(kubectl exec "$(name_prefix)-0" -- bao status -format=json |
# jq -r '.initialized')
# [ "${init_status}" == "true" ]
kubectl create -f ./test/acceptance/injector-test/job.yaml
wait_for_complete_job "pgdump"
}
# kubectl create -f ../../test/acceptance/injector-test/job.yaml
# wait_for_complete_job "pgdump"
# }
# Clean up
teardown() {

View file

@ -8,7 +8,7 @@ load _helpers
kubectl create namespace acceptance
kubectl config set-context --current --namespace=acceptance
helm install "$(name_prefix)" -f ./test/acceptance/server-test/annotations-overrides.yaml .
helm install "$(name_prefix)" -f ../../test/acceptance/server-test/annotations-overrides.yaml .
wait_for_running $(name_prefix)-0
# service annotations

View file

@ -1,121 +0,0 @@
#!/usr/bin/env bats
load _helpers
@test "server/ha: testing deployment" {
cd `chart_dir`
helm install "$(name_prefix)" \
--set='server.ha.enabled=true' .
wait_for_running $(name_prefix)-0
# Sealed, not initialized
wait_for_sealed_vault $(name_prefix)-0
local init_status=$(kubectl exec "$(name_prefix)-0" -- bao status -format=json |
jq -r '.initialized')
[ "${init_status}" == "false" ]
# Replicas
local replicas=$(kubectl get statefulset "$(name_prefix)" --output json |
jq -r '.spec.replicas')
[ "${replicas}" == "3" ]
# Volume Mounts
local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json |
jq -r '.spec.template.spec.containers[0].volumeMounts | length')
[ "${volumeCount}" == "2" ]
# Volumes
local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json |
jq -r '.spec.template.spec.volumes | length')
[ "${volumeCount}" == "2" ]
local volume=$(kubectl get statefulset "$(name_prefix)" --output json |
jq -r '.spec.template.spec.volumes[0].configMap.name')
[ "${volume}" == "$(name_prefix)-config" ]
# Service
local service=$(kubectl get service "$(name_prefix)" --output json |
jq -r '.spec.clusterIP')
[ "${service}" != "None" ]
local service=$(kubectl get service "$(name_prefix)" --output json |
jq -r '.spec.type')
[ "${service}" == "ClusterIP" ]
local ports=$(kubectl get service "$(name_prefix)" --output json |
jq -r '.spec.ports | length')
[ "${ports}" == "2" ]
local ports=$(kubectl get service "$(name_prefix)" --output json |
jq -r '.spec.ports[0].port')
[ "${ports}" == "8200" ]
local ports=$(kubectl get service "$(name_prefix)" --output json |
jq -r '.spec.ports[1].port')
[ "${ports}" == "8201" ]
# OpenBao Init
local token=$(kubectl exec -ti "$(name_prefix)-0" -- \
bao operator init -format=json -n 1 -t 1 | \
jq -r '.unseal_keys_b64[0]')
[ "${token}" != "" ]
# OpenBao Unseal
local pods=($(kubectl get pods --selector='app.kubernetes.io/name=openbao' -o json | jq -r '.items[].metadata.name'))
for pod in "${pods[@]}"
do
kubectl exec -ti ${pod} -- bao operator unseal ${token}
done
wait_for_ready "$(name_prefix)-0"
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-0" -- bao status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "false" ]
local init_status=$(kubectl exec "$(name_prefix)-0" -- bao status -format=json |
jq -r '.initialized')
[ "${init_status}" == "true" ]
}
# setup a consul env
setup() {
kubectl delete namespace acceptance --ignore-not-found=true
kubectl create namespace acceptance
kubectl config set-context --current --namespace=acceptance
helm repo add hashicorp https://helm.releases.hashicorp.com
helm repo update
CONSUL_HELM_VERSION=v0.48.0
K8S_MAJOR=$(kubectl version --output=json | jq -r .serverVersion.major)
K8S_MINOR=$(kubectl version --output=json | jq -r .serverVersion.minor)
if [ \( $K8S_MAJOR -eq 1 \) -a \( $K8S_MINOR -le 20 \) ]; then
CONSUL_HELM_VERSION=v0.32.1
fi
helm install consul hashicorp/consul \
--version $CONSUL_HELM_VERSION \
--set 'ui.enabled=false'
wait_for_running_consul
}
#cleanup
teardown() {
if [[ ${CLEANUP:-true} == "true" ]]
then
# If the test failed, print some debug output
if [[ "$BATS_ERROR_STATUS" -ne 0 ]]; then
kubectl logs -l app=consul
kubectl logs -l app.kubernetes.io/name=openbao
fi
helm delete openbao
helm delete consul
kubectl delete --all pvc
kubectl delete namespace acceptance --ignore-not-found=true
fi
}

View file

@ -19,7 +19,7 @@ load _helpers
helm install \
--wait \
--values ./test/acceptance/server-test/telemetry.yaml \
--values ../../test/acceptance/server-test/telemetry.yaml \
"$(name_prefix)" .
wait_for_running $(name_prefix)-0

View file

@ -3,7 +3,7 @@
# chart_dir returns the directory for the chart
chart_dir() {
echo ${BATS_TEST_DIRNAME}/../..
echo ${BATS_TEST_DIRNAME}/../../charts/openbao
}
# check_result checks if the specified test passed

View file

@ -6,7 +6,7 @@ setup_file() {
cd `chart_dir`
export VERIFY_OUTPUT="/$BATS_RUN_TMPDIR/verify.json"
export CHART_VOLUME=openbao-helm-chart-src
local IMAGE="quay.io/redhat-certification/chart-verifier:1.10.1"
local IMAGE="quay.io/redhat-certification/chart-verifier:1.13.7"
# chart-verifier requires an openshift version if a cluster isn't available
local OPENSHIFT_VERSION="4.12"
local DISABLED_TESTS="chart-testing"

View file

@ -30,4 +30,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.name' | tee /dev/stderr)
[ "${actual}" = "release-name-openbao-csi-provider-clusterrole" ]
}
}

View file

@ -61,4 +61,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.subjects[0].namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -101,7 +101,7 @@ load _helpers
local actual=$(echo $object |
yq -r '.[0].image' | tee /dev/stderr)
[ "${actual}" = "Image1:0.0.1" ]
[ "${actual}" = "docker.io/Image1:0.0.1" ]
local actual=$(echo $object |
yq -r '.[0].imagePullPolicy' | tee /dev/stderr)
[ "${actual}" = "PullPolicy1" ]

View file

@ -38,4 +38,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -38,4 +38,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.subjects[0].namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -69,7 +69,7 @@ load _helpers
--set 'injector.image.tag=1.2.3' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:1.2.3" ]
[ "${actual}" = "docker.io/foo:1.2.3" ]
local actual=$(helm template \
--show-only templates/injector-deployment.yaml \
@ -77,7 +77,7 @@ load _helpers
--set 'injector.image.tag=1.2.3' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:1.2.3" ]
[ "${actual}" = "docker.io/foo:1.2.3" ]
}
@test "injector/deployment: default imagePullPolicy" {

View file

@ -331,4 +331,4 @@ load _helpers
yq '.webhooks[0].objectSelector.matchLabels.injector' | tee /dev/stderr)
[ "${actual}" = "true" ]
}
}

View file

@ -53,4 +53,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -53,4 +53,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -87,4 +87,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.subjects[0].namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -43,7 +43,7 @@ load _helpers
--set 'server.dev.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:1.2.3" ]
[ "${actual}" = "quay.io/foo:1.2.3" ]
}
@test "server/ha-StatefulSet: image tag defaults to latest" {
@ -56,7 +56,7 @@ load _helpers
--set 'server.dev.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:latest" ]
[ "${actual}" = "quay.io/foo:latest" ]
}
#--------------------------------------------------------------------

View file

@ -57,4 +57,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -57,4 +57,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -43,7 +43,7 @@ load _helpers
--set 'server.ha.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:1.2.3" ]
[ "${actual}" = "quay.io/foo:1.2.3" ]
}
@test "server/ha-StatefulSet: image tag defaults to latest" {
@ -56,7 +56,7 @@ load _helpers
--set 'server.ha.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:latest" ]
[ "${actual}" = "quay.io/foo:latest" ]
}
#--------------------------------------------------------------------
@ -71,7 +71,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_ADDR")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_ADDR")) | .[] .value' | tee /dev/stderr)
[ "${value}" = "http://127.0.0.1:8200" ]
}
@ -84,7 +84,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_ADDR")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_ADDR")) | .[] .value' | tee /dev/stderr)
[ "${value}" = "https://127.0.0.1:8200" ]
}
@ -407,7 +407,7 @@ load _helpers
}
#--------------------------------------------------------------------
# VAULT_API_ADDR renders
# BAO_API_ADDR renders
@test "server/ha-StatefulSet: api addr renders to Pod IP by default" {
cd `chart_dir`
@ -418,7 +418,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_API_ADDR")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_API_ADDR")) | .[] .value' | tee /dev/stderr)
[ "${value}" = 'http://$(POD_IP):8200' ]
}
@ -432,12 +432,12 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_API_ADDR")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_API_ADDR")) | .[] .value' | tee /dev/stderr)
[ "${value}" = "https://example.com:8200" ]
}
#--------------------------------------------------------------------
# VAULT_CLUSTER_ADDR renders
# BAO_CLUSTER_ADDR renders
@test "server/ha-StatefulSet: clusterAddr not set" {
cd `chart_dir`
@ -449,7 +449,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_CLUSTER_ADDR")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_CLUSTER_ADDR")) | .[] .value' | tee /dev/stderr)
[ "${value}" = 'https://$(HOSTNAME).release-name-openbao-internal:8201' ]
}
@ -464,7 +464,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_CLUSTER_ADDR")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_CLUSTER_ADDR")) | .[] .value' | tee /dev/stderr)
[ "${value}" = 'https://$(HOSTNAME).release-name-openbao-internal:8201' ]
}
@ -479,7 +479,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_CLUSTER_ADDR")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_CLUSTER_ADDR")) | .[] .value' | tee /dev/stderr)
[ "${value}" = 'https://test.example.com:8201' ]
}
@ -494,7 +494,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_CLUSTER_ADDR")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_CLUSTER_ADDR")) | .[] .value' | tee /dev/stderr)
[ "${value}" = 'http://$(HOSTNAME).release-name-openbao-internal:8201' ]
}
@ -515,7 +515,7 @@ local value=$(echo $rendered |
}
#--------------------------------------------------------------------
# VAULT_RAFT_NODE_ID renders
# BAO_RAFT_NODE_ID renders
@test "server/ha-StatefulSet: raft node ID renders" {
cd `chart_dir`
@ -528,7 +528,7 @@ local value=$(echo $rendered |
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $object |
yq -r 'map(select(.name=="VAULT_RAFT_NODE_ID")) | .[] .valueFrom.fieldRef.fieldPath' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_RAFT_NODE_ID")) | .[] .valueFrom.fieldRef.fieldPath' | tee /dev/stderr)
[ "${value}" = "metadata.name" ]
}

View file

@ -74,25 +74,3 @@ load _helpers
yq '.spec.ipFamilies' -c | tee /dev/stderr)
[ "${actual}" = '["IPv4","IPv6"]' ]
}
@test "server/headless-Service: Assert ipFamilyPolicy is not set if version below 1.23" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-headless-service.yaml \
--kube-version 1.27.0 \
--set 'server.service.ipFamilyPolicy=PreferDualStack' \
. | tee /dev/stderr |
yq -r '.spec.ipFamilyPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "server/headless-Service: Assert ipFamilies is not set if version below 1.23" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-headless-service.yaml \
--kube-version 1.27.0 \
--set 'server.service.ipFamilies={IPv4,IPv6}' \
. | tee /dev/stderr |
yq -r '.spec.ipFamilies' | tee /dev/stderr)
[ "${actual}" = "null" ]
}

View file

@ -127,4 +127,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -127,4 +127,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.namespace' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -487,25 +487,3 @@ load _helpers
yq '.spec.ipFamilies' -c | tee /dev/stderr)
[ "${actual}" = '["IPv4","IPv6"]' ]
}
@test "server/Service: Assert ipFamilyPolicy is not set if version below 1.23" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-service.yaml \
--kube-version 1.27.0 \
--set 'server.service.ipFamilyPolicy=PreferDualStack' \
. | tee /dev/stderr |
yq -r '.spec.ipFamilyPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "server/Service: Assert ipFamilies is not set if version below 1.23" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/server-service.yaml \
--kube-version 1.27.0 \
--set 'server.service.ipFamilies={IPv4,IPv6}' \
. | tee /dev/stderr |
yq -r '.spec.ipFamilies' | tee /dev/stderr)
[ "${actual}" = "null" ]
}

View file

@ -145,4 +145,4 @@ load _helpers
. | tee /dev/stderr |
yq -r '.metadata.labels.foo' | tee /dev/stderr)
[ "${actual}" = "bar" ]
}
}

View file

@ -105,7 +105,7 @@ load _helpers
--set 'server.image.tag=1.2.3' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:1.2.3" ]
[ "${actual}" = "quay.io/foo:1.2.3" ]
local actual=$(helm template \
--show-only templates/server-statefulset.yaml \
@ -114,7 +114,7 @@ load _helpers
--set 'server.standalone.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:1.2.3" ]
[ "${actual}" = "quay.io/foo:1.2.3" ]
}
@test "server/standalone-StatefulSet: image tag defaults to latest" {
@ -125,7 +125,7 @@ load _helpers
--set 'server.image.tag=' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:latest" ]
[ "${actual}" = "quay.io/foo:latest" ]
local actual=$(helm template \
--show-only templates/server-statefulset.yaml \
@ -134,7 +134,7 @@ load _helpers
--set 'server.standalone.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:latest" ]
[ "${actual}" = "quay.io/foo:latest" ]
}
@test "server/standalone-StatefulSet: default imagePullPolicy" {
@ -224,43 +224,11 @@ load _helpers
#--------------------------------------------------------------------
# persistentVolumeClaimRetentionPolicy
@test "server/standalone-StatefulSet: persistentVolumeClaimRetentionPolicy not set by default when kubernetes < 1.23" {
cd `chart_dir`
local actual=$(helm template \
-s templates/server-statefulset.yaml \
--kube-version "1.27" \
. | tee /dev/stderr |
yq -r '.spec.persistentVolumeClaimRetentionPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "server/standalone-StatefulSet: unset persistentVolumeClaimRetentionPolicy.whenDeleted when kubernetes < 1.23" {
cd `chart_dir`
local actual=$(helm template \
-s templates/server-statefulset.yaml \
--kube-version "1.27" \
--set 'server.persistentVolumeClaimRetentionPolicy.whenDeleted=Delete' \
. | tee /dev/stderr |
yq -r '.spec.persistentVolumeClaimRetentionPolicy.whenDeleted' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "server/standalone-StatefulSet: unset persistentVolumeClaimRetentionPolicy.whenScaled when kubernetes < 1.23" {
cd `chart_dir`
local actual=$(helm template \
-s templates/server-statefulset.yaml \
--kube-version "1.27" \
--set 'server.persistentVolumeClaimRetentionPolicy.whenScaled=Delete' \
. | tee /dev/stderr |
yq -r '.spec.persistentVolumeClaimRetentionPolicy.whenScaled' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "server/standalone-StatefulSet: persistentVolumeClaimRetentionPolicy not set by default when kubernetes >= 1.23" {
cd `chart_dir`
local actual=$(helm template \
-s templates/server-statefulset.yaml \
--kube-version "1.23" \
--kube-version "1.27" \
. | tee /dev/stderr |
yq -r '.spec.persistentVolumeClaimRetentionPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
@ -270,7 +238,7 @@ load _helpers
cd `chart_dir`
local actual=$(helm template \
-s templates/server-statefulset.yaml \
--kube-version "1.23" \
--kube-version "1.27" \
--set 'server.persistentVolumeClaimRetentionPolicy.whenDeleted=Delete' \
. | tee /dev/stderr |
yq -r '.spec.persistentVolumeClaimRetentionPolicy.whenDeleted' | tee /dev/stderr)
@ -281,7 +249,7 @@ load _helpers
cd `chart_dir`
local actual=$(helm template \
-s templates/server-statefulset.yaml \
--kube-version "1.23" \
--kube-version "1.27" \
--set 'server.persistentVolumeClaimRetentionPolicy.whenScaled=Delete' \
. | tee /dev/stderr |
yq -r '.spec.persistentVolumeClaimRetentionPolicy.whenScaled' | tee /dev/stderr)
@ -571,7 +539,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $objects |
yq -r 'map(select(.name=="VAULT_LOG_LEVEL")) | .[] .name' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_LOG_LEVEL")) | .[] .name' | tee /dev/stderr)
[ "${value}" = "" ]
}
@ -584,7 +552,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $objects |
yq -r 'map(select(.name=="VAULT_LOG_LEVEL")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_LOG_LEVEL")) | .[] .value' | tee /dev/stderr)
[ "${value}" = "debug" ]
}
@ -599,7 +567,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $objects |
yq -r 'map(select(.name=="VAULT_LOG_FORMAT")) | .[] .name' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_LOG_FORMAT")) | .[] .name' | tee /dev/stderr)
[ "${value}" = "" ]
}
@ -612,7 +580,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr)
local value=$(echo $objects |
yq -r 'map(select(.name=="VAULT_LOG_FORMAT")) | .[] .value' | tee /dev/stderr)
yq -r 'map(select(.name=="BAO_LOG_FORMAT")) | .[] .value' | tee /dev/stderr)
[ "${value}" = "json" ]
}
@ -800,7 +768,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "audit")' | tee /dev/stderr)
local actual=$(echo $object | yq -r '.mountPath' | tee /dev/stderr)
[ "${actual}" = "/vault/audit" ]
[ "${actual}" = "/openbao/audit" ]
}
@test "server/standalone-StatefulSet: can configure audit storage mount path" {
@ -825,7 +793,7 @@ load _helpers
yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "data")' | tee /dev/stderr)
local actual=$(echo $object | yq -r '.mountPath' | tee /dev/stderr)
[ "${actual}" = "/vault/data" ]
[ "${actual}" = "/openbao/data" ]
}
@test "server/standalone-StatefulSet: can configure data storage mount path" {
@ -1686,7 +1654,7 @@ load _helpers
local actual=$(helm template \
--show-only templates/server-statefulset.yaml \
. | tee /dev/stderr |
yq '.spec.template.metadata.annotations["vault.hashicorp.com/config-checksum"] == null' | tee /dev/stderr)
yq '.spec.template.metadata.annotations["openbao.hashicorp.com/config-checksum"] == null' | tee /dev/stderr)
[ "${actual}" = "true" ]
}
@ -1705,7 +1673,7 @@ load _helpers
--show-only templates/server-statefulset.yaml \
--set 'server.includeConfigAnnotation=true' \
. | tee /dev/stderr |
yq '.spec.template.metadata.annotations["vault.hashicorp.com/config-checksum"] == null' | tee /dev/stderr)
yq '.spec.template.metadata.annotations["openbao.hashicorp.com/config-checksum"] == null' | tee /dev/stderr)
[ "${actual}" = "false" ]
}

View file

@ -52,7 +52,7 @@ load _helpers
--show-only templates/tests/server-test.yaml \
. | tee /dev/stderr |
yq -r '.metadata.name' | tee /dev/stderr)
[ "${actual}" = "vault-server-test" ]
[ "${actual}" = "openbao-server-test" ]
}
@test "server/standalone-server-test-Pod: release metadata.name foo" {
@ -61,7 +61,7 @@ load _helpers
--show-only templates/tests/server-test.yaml \
. | tee /dev/stderr |
yq -r '.metadata.name' | tee /dev/stderr)
[ "${actual}" = "foo-vault-server-test" ]
[ "${actual}" = "foo-openbao-server-test" ]
}
@test "server/standalone-server-test-Pod: default server.standalone.enabled" {
@ -134,7 +134,7 @@ load _helpers
--set 'server.image.tag=1.2.3' \
. | tee /dev/stderr |
yq -r '.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:1.2.3" ]
[ "${actual}" = "quay.io/foo:1.2.3" ]
local actual=$(helm template \
--show-only templates/tests/server-test.yaml \
@ -143,7 +143,7 @@ load _helpers
--set 'server.standalone.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:1.2.3" ]
[ "${actual}" = "quay.io/foo:1.2.3" ]
}
@test "server/standalone-server-test-Pod: image tag defaults to latest" {
@ -154,7 +154,7 @@ load _helpers
--set 'server.image.tag=' \
. | tee /dev/stderr |
yq -r '.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:latest" ]
[ "${actual}" = "quay.io/foo:latest" ]
local actual=$(helm template \
--show-only templates/tests/server-test.yaml \
@ -163,7 +163,7 @@ load _helpers
--set 'server.standalone.enabled=true' \
. | tee /dev/stderr |
yq -r '.spec.containers[0].image' | tee /dev/stderr)
[ "${actual}" = "foo:latest" ]
[ "${actual}" = "quay.io/foo:latest" ]
}
@test "server/standalone-server-test-Pod: default imagePullPolicy" {

View file

@ -406,27 +406,3 @@ load _helpers
yq -r '.spec.ipFamilyPolicy' | tee /dev/stderr)
[ "${actual}" = "PreferDualStack" ]
}
@test "server/Service: Assert ipFamilyPolicy is not set if version below 1.23" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/ui-service.yaml \
--kube-version 1.27.0 \
--set 'ui.enabled=true' \
--set 'ui.serviceIPFamilyPolicy=PreferDualStack' \
. | tee /dev/stderr |
yq -r '.spec.ipFamilyPolicy' | tee /dev/stderr)
[ "${actual}" = "null" ]
}
@test "server/Service: Assert ipFamilies is not set if version below 1.23" {
cd `chart_dir`
local actual=$(helm template \
--show-only templates/ui-service.yaml \
--kube-version 1.27.0 \
--set 'ui.enabled=true' \
--set 'ui.serviceIPFamilies={IPv4,IPv6}' \
. | tee /dev/stderr |
yq -r '.spec.ipFamilies' | tee /dev/stderr)
[ "${actual}" = "null" ]
}