Run CI tests in github workflows (#657)

Ports the bats unit, chart-verifier, and bats acceptance tests to use
github workflows and actions. The acceptance tests run using kind, and
run for multiple k8s versions, on pushes to the main branch.

Adds a SKIP_CSI env check in the CSI acceptance test, set in the
workflow if K8s version is less than 1.16.

Adds kubeAdmConfigPatches to the kind config to allow testing the CSI
provider on K8s versions prior to 1.21.

Updates the Secrets Store CSI driver to 1.0.0 in tests.

Makes the HA Vault tests more robust by waiting for all consul client
pods to be Ready, and waits with a timeout for Vault to start
responding as sealed (since the tests on GitHub runners were often
failing at that point).

Co-authored-by: Tom Proctor <tomhjp@users.noreply.github.com>
This commit is contained in:
Theron Voran 2021-12-10 17:11:35 -08:00 committed by GitHub
parent 9fa25e97c8
commit 48eebb0b09
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 142 additions and 44 deletions

34
.github/workflows/acceptance.yaml vendored Normal file
View file

@ -0,0 +1,34 @@
name: Acceptance Tests
on:
push:
branches:
- main
workflow_dispatch: {}
jobs:
kind:
strategy:
fail-fast: false
matrix:
kind-k8s-version: [1.14.10, 1.19.11, 1.20.7, 1.21.2, 1.22.4]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup test tools
uses: ./.github/workflows/setup-test-tools
- name: Create K8s Kind Cluster
uses: helm/kind-action@v1.2.0
with:
config: test/kind/config.yaml
node_image: kindest/node:v${{ matrix.kind-k8s-version }}
# Skip CSI tests if K8s version < 1.16.x
- run: echo K8S_MINOR=$(kubectl version -o json | jq -r .serverVersion.minor) >> $GITHUB_ENV
- if: ${{ env.K8S_MINOR < 16 }}
run: echo "SKIP_CSI=true" >> $GITHUB_ENV
- run: bats ./test/acceptance -t
env:
VAULT_LICENSE_CI: ${{ secrets.VAULT_LICENSE_CI }}

View file

@ -0,0 +1,18 @@
name: Setup common testing tools
description: Install bats and python-yq
runs:
using: "composite"
steps:
- uses: actions/setup-node@v2
with:
node-version: '14'
- run: npm install -g bats@${BATS_VERSION}
shell: bash
env:
BATS_VERSION: '1.5.0'
- run: bats -v
shell: bash
- uses: actions/setup-python@v2
- run: pip install yq
shell: bash

25
.github/workflows/tests.yaml vendored Normal file
View file

@ -0,0 +1,25 @@
name: Tests
on: [push, workflow_dispatch]
jobs:
bats-unit-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: ./.github/workflows/setup-test-tools
- run: bats ./test/unit -t
chart-verifier:
runs-on: ubuntu-latest
env:
CHART_VERIFIER_VERSION: '1.2.1'
steps:
- uses: actions/checkout@v2
- name: Setup test tools
uses: ./.github/workflows/setup-test-tools
- uses: actions/setup-go@v2
with:
go-version: '1.17.4'
- run: go install github.com/redhat-certification/chart-verifier@${CHART_VERIFIER_VERSION}
- run: bats ./test/chart -t

View file

@ -40,35 +40,37 @@ helm_install_ha() {
${BATS_TEST_DIRNAME}/../..
}
# wait for consul to be running
# wait for consul to be ready
wait_for_running_consul() {
check() {
# This requests the pod and checks whether the status is running
# and the ready state is true. If so, it outputs the name. Otherwise
# it outputs empty. Therefore, to check for success, check for nonzero
# string length.
kubectl get pods -l component=client -o json | \
jq -r '.items[0] | select(
.status.phase == "Running" and
([ .status.conditions[] | select(.type == "Ready" and .status == "True") ] | length) == 1
) | .metadata.name'
kubectl wait --for=condition=Ready --timeout=5m pod -l app=consul,component=client
}
for i in $(seq 60); do
if [ -n "$(check ${POD_NAME})" ]; then
echo "consul clients are ready."
return
wait_for_sealed_vault() {
POD_NAME=$1
check() {
sealed_status=$(kubectl exec $1 -- vault status -format=json | jq -r '.sealed')
if [ "$sealed_status" == "true" ]; then
return 0
fi
echo "Waiting for ${POD_NAME} to be ready..."
sleep 2
done
echo "consul clients never became ready."
return 1
}
# wait for a pod to be ready
for i in $(seq 60); do
if check ${POD_NAME}; then
echo "Vault on ${POD_NAME} is running."
return
fi
echo "Waiting for Vault on ${POD_NAME} to be running..."
sleep 2
done
echo "Vault on ${POD_NAME} never became running."
return 1
}
# wait for a pod to be running
wait_for_running() {
POD_NAME=$1

View file

@ -2,18 +2,27 @@
load _helpers
check_skip_csi() {
if [ ! -z ${SKIP_CSI} ]; then
skip "Skipping CSI tests"
fi
}
@test "csi: testing deployment" {
check_skip_csi
cd `chart_dir`
kubectl delete namespace acceptance --ignore-not-found=true
kubectl create namespace acceptance
# Install Secrets Store CSI driver
CSI_DRIVER_VERSION=0.2.0
helm install secrets-store-csi-driver https://github.com/kubernetes-sigs/secrets-store-csi-driver/blob/v${CSI_DRIVER_VERSION}/charts/secrets-store-csi-driver-${CSI_DRIVER_VERSION}.tgz?raw=true \
CSI_DRIVER_VERSION=1.0.0
helm install secrets-store-csi-driver https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts/secrets-store-csi-driver-${CSI_DRIVER_VERSION}.tgz?raw=true \
--wait --timeout=5m \
--namespace=acceptance \
--set linux.image.pullPolicy="IfNotPresent"
--set linux.image.pullPolicy="IfNotPresent" \
--set syncSecret.enabled=true
# Install Vault and Vault provider
helm install vault \
--wait --timeout=5m \
@ -49,6 +58,8 @@ load _helpers
# Clean up
teardown() {
check_skip_csi
if [[ ${CLEANUP:-true} == "true" ]]
then
echo "helm/pvc teardown"

View file

@ -15,9 +15,7 @@ load _helpers
wait_for_running "$(name_prefix)-east-0"
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "true" ]
wait_for_sealed_vault $(name_prefix)-east-0
local init_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json |
jq -r '.initialized')
@ -50,7 +48,7 @@ load _helpers
fi
done
# Sealed, not initialized
# Unsealed, initialized
local sealed_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "false" ]
@ -84,9 +82,7 @@ load _helpers
wait_for_running "$(name_prefix)-west-0"
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "true" ]
wait_for_sealed_vault $(name_prefix)-west-0
local init_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json |
jq -r '.initialized')
@ -119,7 +115,7 @@ load _helpers
fi
done
# Sealed, not initialized
# Unsealed, initialized
local sealed_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "false" ]

View file

@ -15,9 +15,7 @@ load _helpers
wait_for_running "$(name_prefix)-east-0"
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "true" ]
wait_for_sealed_vault $(name_prefix)-east-0
local init_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json |
jq -r '.initialized')
@ -50,7 +48,7 @@ load _helpers
fi
done
# Sealed, not initialized
# Unsealed, initialized
local sealed_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "false" ]
@ -84,9 +82,7 @@ load _helpers
wait_for_running "$(name_prefix)-west-0"
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "true" ]
wait_for_sealed_vault $(name_prefix)-west-0
local init_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json |
jq -r '.initialized')
@ -119,7 +115,7 @@ load _helpers
fi
done
# Sealed, not initialized
# Unsealed, initialized
local sealed_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "false" ]

View file

@ -11,9 +11,7 @@ load _helpers
wait_for_running $(name_prefix)-0
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "true" ]
wait_for_sealed_vault $(name_prefix)-0
local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json |
jq -r '.initialized')
@ -112,6 +110,10 @@ setup() {
teardown() {
if [[ ${CLEANUP:-true} == "true" ]]
then
# If the test failed, print some debug output
if [[ "$BATS_ERROR_STATUS" -ne 0 ]]; then
kubectl logs -l app.kubernetes.io/name=vault
fi
helm delete vault
kubectl delete --all pvc
kubectl delete namespace acceptance --ignore-not-found=true

View file

@ -10,9 +10,7 @@ load _helpers
wait_for_running $(name_prefix)-0
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "true" ]
wait_for_sealed_vault $(name_prefix)-0
local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json |
jq -r '.initialized')
@ -91,7 +89,7 @@ setup() {
helm install consul \
https://github.com/hashicorp/consul-helm/archive/v0.28.0.tar.gz \
--set 'ui.enabled=false' \
--set 'ui.enabled=false'
wait_for_running_consul
}
@ -100,6 +98,11 @@ setup() {
teardown() {
if [[ ${CLEANUP:-true} == "true" ]]
then
# If the test failed, print some debug output
if [[ "$BATS_ERROR_STATUS" -ne 0 ]]; then
kubectl logs -l app=consul
kubectl logs -l app.kubernetes.io/name=vault
fi
helm delete vault
helm delete consul
kubectl delete --all pvc

View file

@ -13,9 +13,7 @@ load _helpers
wait_for_running $(name_prefix)-0
# Sealed, not initialized
local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json |
jq -r '.sealed' )
[ "${sealed_status}" == "true" ]
wait_for_sealed_vault $(name_prefix)-0
local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json |
jq -r '.initialized')

View file

@ -5,3 +5,16 @@ nodes:
- role: worker
- role: worker
- role: worker
# These apiServer settings are included for running the CSI provider on K8s
# prior to 1.21
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
metadata:
name: config
apiServer:
extraArgs:
"service-account-issuer": "https://kubernetes.default.svc.cluster.local"
"service-account-signing-key-file": "/etc/kubernetes/pki/sa.key"
"service-account-api-audiences": "https://kubernetes.default.svc.cluster.local"