Initial commit

This commit is contained in:
Mitchell Hashimoto 2018-08-17 22:08:03 -07:00
commit ef0d2cbcee
No known key found for this signature in database
GPG key ID: 744E147AA52F5B0A
28 changed files with 765 additions and 0 deletions

5
.gitignore vendored Normal file
View file

@ -0,0 +1,5 @@
.DS_Store
.terraform/
terraform.tfstate*
terraform.tfvars
values.yaml

34
README.md Normal file
View file

@ -0,0 +1,34 @@
# Consul Helm Chart
This repository contains the official HashiCorp Helm chart for installing
and configuring Consul on Kubernetes. This chart supports multiple use
cases of Consul on Kubernetes depending on the values provided.
## Prerequisites
To use the charts here, [Helm](https://helm.sh/) must be installed in your
Kubernetes cluster. Setting up Kubernetes and Helm and is outside the scope
of this README. Please refer to the Kubernetes and Helm documentation.
## Testing
The Helm charts are tested in two forms: [Bats](https://github.com/bats-core/bats-core)
tests and `helm test` tests. The Bats tests test changing Helm chart values and
the effect on the install. The `helm test` tests verify that a deployed chart
appears healthy.
To run the Bats test: `kubectl` must be configured locally to be authenticated
to a running Kubernetes cluster with Helm installed. With that in place,
just run bats:
bats ./charts/consul/test
If the tests fail, deployed resources in the Kubernetes cluster may not
be properly cleaned up. We recommend recycling the Kubernetes cluster to
start from a clean slate.
**Note:** There is a Terraform configuration in the
[terraform/ directory](https://github.com/hashicorp/consul-k8s/tree/master/terraform)
that can be used to quickly bring up a GKE cluster and configure
`kubectl` and `helm` locally. This can be used to quickly spin up a test
cluster.

8
charts/consul/Chart.yaml Normal file
View file

@ -0,0 +1,8 @@
apiVersion: v1
name: consul
version: 0.1.0
description: Install and configure Consul on Kubernetes.
home: https://www.consul.io
sources:
- https://github.com/hashicorp/consul
- https://github.com/hashicorp/consul-k8s

View file

@ -0,0 +1,26 @@
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to
this (by the DNS naming spec). If release name contains chart name it will
be used as a full name.
*/}}
{{- define "consul.namePrefix" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Compute the maximum number of unavailable replicas for the PodDisruptionBudget.
This defaults to (n/2)-1 where n is the number of members of the server cluster.
*/}}
{{- define "consul.pdb.maxUnavailable" -}}
{{- if .Values.server.disruptionBudget.maxUnavailable -}}
{{ .Values.server.disruptionBudget.maxUnavailable -}}
{{- else -}}
{{- ceil (sub (div (int .Values.server.replicas) 2) 1) -}}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,85 @@
# DaemonSet to run the Consul clients on every node.
{{- if .Values.client.enabled }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: consul
spec:
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
annotations:
"consul.hashicorp.com/connect-inject": "false"
spec:
terminationGracePeriodSeconds: 10
# Consul agents require a directory for data, even clients. The data
# is okay to be wiped though if the Pod is removed, so just use an
# emptyDir volume.
volumes:
- name: data
emptyDir: {}
containers:
- name: consul
image: "{{ .Values.client.image }}"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- "/bin/sh"
- "-ec"
- |
exec /bin/consul agent \
-advertise="${POD_IP}" \
-bind=0.0.0.0 \
-client=0.0.0.0 \
-datacenter={{ .Values.server.datacenter }} \
-data-dir=/consul/data \
{{- if (.Values.client.join) and (gt (len .Values.client.join) 0) }}
{{- range $value := .Values.client.join }}
-retry-join={{ $value }} \
{{- end }}
{{- else }}
{{- if .Values.server.enabled }}
{{- range $index := until (.Values.server.replicas | int) }}
-retry-join=consul-server-{{ $index }}.consul-server.${NAMESPACE}.svc \
{{- end }}
{{- end }}
{{- end }}
-domain={{ .Values.common.domain }}
volumeMounts:
- name: data
mountPath: /consul/data
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave
ports:
- containerPort: 8500
hostPort: 8500
name: http
- containerPort: 8301
name: serflan
- containerPort: 8302
name: serfwan
- containerPort: 8300
name: server
- containerPort: 8600
name: dns
resources:
{{ toYaml .Values.server.resources | indent 12 }}
{{- end }}

View file

@ -0,0 +1,69 @@
# The deployment for running the Connect sidecar injector
{{- if .Values.connectInject.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: consul-connect-injector-webhook-deployment
labels:
app: consul-connect-injector
spec:
replicas: 1
selector:
matchLabels:
app: consul-connect-injector
template:
metadata:
labels:
app: consul-connect-injector
spec:
containers:
- name: sidecar-injector
image: us.gcr.io/mitchellh-k8s/consul-k8s:latest
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- "/bin/sh"
- "-ec"
- |
consul-k8s inject \
-default-inject={{ .Values.connectInject.default }} \
-listen=:8080 \
{{- if .Values.connectInject.certs.secretName }}
-tls-cert-file=/etc/connect-injector/certs/{{ .Values.connectInject.certs.certName }}
-tls-key-file=/etc/connect-injector/certs/{{ .Values.connectInject.certs.keyName }}
{{- else }}
-tls-auto=consul-connect-injector-cfg \
-tls-auto-hosts=consul-connect-injector-svc,consul-connect-injector-svc.${NAMESPACE},consul-connect-injector-svc.${NAMESPACE}.svc
{{- end }}
livenessProbe:
tcpSocket:
port: 8080
failureThreshold: 2
initialDelaySeconds: 1
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /health/ready
port: 8080
scheme: HTTPS
failureThreshold: 2
initialDelaySeconds: 2
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 5
{{- if .Values.connectInject.certs.secretName }}
volumeMounts:
- name: certs
mountPath: /etc/connect-injector/certs
readOnly: true
volumes:
- name: certs
secret:
secretName: {{ .Values.connectInject.certs.secretName }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,26 @@
# The MutatingWebhookConfiguration to enable the Connect injector.
{{- if (.Values.connectInject.enabled) and (.Values.connectInject.caBundle) }}
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: consul-connect-injector-cfg
labels:
app: consul-connect-injector
webhooks:
- name: consul-connect-injector.consul.hashicorp.com
clientConfig:
service:
name: consul-connect-injector-svc
namespace: default
path: "/mutate"
caBundle: {{ .Values.connectInject.caBundle }}
rules:
- operations: [ "CREATE" ]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods"]
{{- if .Values.connectInject.namespaceSelector }}
namespaceSelector:
{{ tpl .Values.connectInject.namespaceSelector . | indent 6 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,16 @@
# The service for the Connect sidecar injector
{{- if .Values.connectInject.enabled }}
apiVersion: v1
kind: Service
metadata:
name: consul-connect-injector-svc
labels:
app: consul-connect-injector
spec:
ports:
- port: 443
targetPort: 8080
selector:
app: consul-connect-injector
{{- end }}

View file

@ -0,0 +1,10 @@
# StatefulSet to run the actual Consul server cluster.
{{- if .Values.server.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: consul-server-config
data:
extra-from-values.json: |-
{{ tpl .Values.server.extraConfig . | indent 4 }}
{{- end }}

View file

@ -0,0 +1,13 @@
# PodDisruptionBudget to prevent degrading the server cluster through
# voluntary cluster changes.
{{- if (.Values.server.enabled) and (.Values.server.disruptionBudget.enabled) }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: consul-pdb
spec:
maxUnavailable: {{ template "consul.pdb.maxUnavailable" . }}
selector:
matchLabels:
app: consul-server
{{- end }}

View file

@ -0,0 +1,51 @@
# Headless service for Consul server DNS entries. This service should only
# point to Consul servers. For access to an agent, one should assume that
# the agent is installed locally on the node and the NODE_IP should be used.
# If the node can't run a Consul agent, then this service can be used to
# communicate directly to a server agent.
{{- if .Values.server.enabled }}
apiVersion: v1
kind: Service
metadata:
name: consul-server
labels:
name: consul-server
annotations:
# This must be set in addition to publishNotReadyAddresses due
# to an open issue where it may not work:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None
# We want the servers to become available even if they're not ready
# since this DNS is also used for join operations.
publishNotReadyAddresses: true
ports:
- name: http
port: 8500
targetPort: 8500
- name: serflan-tcp
protocol: "TCP"
port: 8301
targetPort: 8301
- name: serflan-udp
protocol: "UDP"
port: 8301
targetPort: 8301
- name: serfwan-tcp
protocol: "TCP"
port: 8302
targetPort: 8302
- name: serfwan-udp
protocol: "UDP"
port: 8302
targetPort: 8302
- name: server
port: 8300
targetPort: 8300
- name: dns
port: 8600
targetPort: 8600
selector:
app: consul-server
{{- end }}

View file

@ -0,0 +1,126 @@
# StatefulSet to run the actual Consul server cluster.
{{- if .Values.server.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul-server
spec:
selector:
matchLabels:
app: consul-server
serviceName: consul-server
podManagementPolicy: Parallel
replicas: {{ .Values.server.replicas }}
{{- if (gt (int .Values.server.updatePartition) 0) }}
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: {{ .Values.server.updatePartition }}
{{- end }}
template:
metadata:
labels:
app: consul-server
annotations:
"consul.hashicorp.com/connect-inject": "false"
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- consul-server
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
securityContext:
fsGroup: 1000
volumes:
- name: config
configMap:
name: consul-server-config
containers:
- name: consul
image: "{{ .Values.server.image }}"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- "/bin/sh"
- "-ec"
- |
exec /bin/consul agent \
-advertise="${POD_IP}" \
-bind=0.0.0.0 \
-bootstrap-expect={{ .Values.server.bootstrapExpect }} \
-client=0.0.0.0 \
-config-dir=/consul/config \
-datacenter={{ .Values.server.datacenter }} \
-data-dir=/consul/data \
-domain={{ .Values.common.domain }} \
{{- if .Values.server.connect }}
-hcl="connect { enabled = true }" \
{{- end }}
{{- if .Values.ui.enabled }}
-ui \
{{- end }}
{{- range $index := until (.Values.server.replicas | int) }}
-retry-join=consul-server-{{ $index }}.consul-server.${NAMESPACE}.svc \
{{- end }}
-server
volumeMounts:
- name: data
mountPath: /consul/data
- name: config
mountPath: /consul/config
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave
ports:
- containerPort: 8500
name: http
- containerPort: 8301
name: serflan
- containerPort: 8302
name: serfwan
- containerPort: 8300
name: server
- containerPort: 8600
name: dns
readinessProbe:
# NOTE(mitchellh): when our HTTP status endpoints support the
# proper status codes, we should switch to that. This is temporary.
exec:
command:
- "/bin/sh"
- "-ec"
- |
curl http://127.0.0.1:8500/v1/status/leader 2>/dev/null | \
grep -E '".+"'
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 5
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.server.storage }}
{{- end }}

View file

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "consul.namePrefix" . }}-tests
data:
run.sh: |-
@test "Testing Consul cluster has quorum" {
[ `kubectl exec {{ template "consul.namePrefix" . }}-server-0 consul members --namespace={{ .Release.Namespace }} | grep server | wc -l` -ge "3" ]
}

View file

@ -0,0 +1,37 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ .Release.Name }}-test-{{ randAlphaNum 5 | lower }}"
annotations:
"helm.sh/hook": test-success
spec:
initContainers:
- name: test-framework
image: dduportal/bats:0.4.0
command:
- "bash"
- "-c"
- |
set -ex
# copy bats to tools dir
cp -R /usr/local/libexec/ /tools/bats/
volumeMounts:
- mountPath: /tools
name: tools
containers:
- name: {{ .Release.Name }}-test
image: {{ .Values.test.image }}:{{ .Values.test.imageTag }}
command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
volumeMounts:
- mountPath: /tests
name: tests
readOnly: true
- mountPath: /tools
name: tools
volumes:
- name: tests
configMap:
name: {{ template "consul.namePrefix" . }}-tests
- name: tools
emptyDir: {}
restartPolicy: Never

View file

@ -0,0 +1,23 @@
# Headless service for Consul server DNS entries. This service should only
# point to Consul servers. For access to an agent, one should assume that
# the agent is installed locally on the node and the NODE_IP should be used.
# If the node can't run a Consul agent, then this service can be used to
# communicate directly to a server agent.
{{- if (.Values.server.enabled) and (.Values.ui.enabled) and (.Values.ui.service) }}
apiVersion: v1
kind: Service
metadata:
name: consul-ui
labels:
name: consul-ui
spec:
selector:
app: consul-server
ports:
- name: http
port: 80
targetPort: 8500
{{- if .Values.ui.serviceType }}
type: {{ .Values.ui.serviceType }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,45 @@
# name_prefix returns the prefix of the resources within Kubernetes.
name_prefix() {
printf "consul"
}
# helm_install installs the Consul chart.
helm_install() {
helm install --name consul --wait ${BATS_TEST_DIRNAME}/..
}
# helm_delete deletes the Consul chart and all resources.
helm_delete() {
helm delete --purge consul
kubectl delete --all pvc
}
# wait for a pod to be ready
wait_for_ready() {
POD_NAME=$1
check() {
# This requests the pod and checks whether the status is running
# and the ready state is true. If so, it outputs the name. Otherwise
# it outputs empty. Therefore, to check for success, check for nonzero
# string length.
kubectl get pods $1 -o json | \
jq -r 'select(
.status.phase == "Running" and
([ .status.conditions[] | select(.type == "Ready" and .status == "True") ] | length) == 1
) | .metadata.namespace + "/" + .metadata.name'
}
for i in $(seq 30); do
if [ -n "$(check ${POD_NAME})" ]; then
echo "${POD_NAME} is ready."
return
fi
echo "Waiting for ${POD_NAME} to be ready..."
sleep 2
done
echo "${POD_NAME} never became ready."
exit 1
}

View file

@ -0,0 +1,17 @@
#!/usr/bin/env bats
load _helpers
@test "server: default, comes up healthy" {
helm_install
wait_for_ready $(name_prefix)-server-0
# Verify there are three servers
local server_count=$(kubectl exec "$(name_prefix)-server-0" consul members |
grep server |
wc -l)
[ "${server_count}" -eq "3" ]
# Clean up
helm_delete
}

23
terraform/README.md Normal file
View file

@ -0,0 +1,23 @@
# Terraform
This folder contains a Terraform configuration that can be used to setup
an example cluster. These are not meant to be production ready modules for
using Consul with Kubernetes.
The pre-requisites for Terraform are:
* Google Cloud authentication. See [Google Application Default Credentials](https://cloud.google.com/docs/authentication/production). You may also reuse your `gcloud` credentials by exposing them as application defaults by running `gcloud auth application-default login`.
* `gcloud` installed and configured locally with GKE components.
* The following programs available on the PATH: `kubectl`, `helm`, `grep`, `xargs`.
With that available, run the following:
```
$ terraform init
$ terraform apply
```
The apply will ask you for the name of the project to setup the cluster.
After this, everything will be setup, your local `kubectl` credentials will
be configured, and you may use `helm` directly.

9
terraform/main.tf Normal file
View file

@ -0,0 +1,9 @@
module "gke" {
source = "./modules/gke"
project = "${var.project}"
}
module "helm" {
source = "./modules/helm"
trigger = "${module.gke.cluster_id}"
}

View file

@ -0,0 +1,9 @@
# Terraform Modules
This directory contains reusable [Terraform](https://www.terraform.io) modules
for various tasks related to Consul and Kubernetes, from spinning up a demo
cluster to running tests.
These modules are used by our own automated systems for verifying the
functionality of the Consul and Kubernetes components. These modules aren't
meant to be production-ready deployment modules.

View file

@ -0,0 +1,22 @@
# GKE Cluster Setup
This module creates a GKE cluster for running and testing the Consul and
Kubernetes integrations. The GKE cluster is an opinionated setup and this
module is not meant to be a generic GKE module. This module also configures
`kubectl` credentials.
After this module completes, a GKE cluster is created and `kubectl` is
configured such that you can immediately verify the Kubernetes cluster:
kubectl get componentstatus
**WARNING:** This module will create resources that cost money. This does
not use free tier resources.
## Requirements
* Google Cloud authentication. See [Google Application Default Credentials](https://cloud.google.com/docs/authentication/production). You may also reuse your `gcloud` credentials by exposing them as application defaults by running `gcloud auth application-default login`.
* `gcloud` installed and configured locally with GKE components and available on the PATH.
* `kubectl` installed locally and available on the PATH.
* A Google Cloud Project with GKE and billing activated.
* Unix-like environment that supports piping, `grep`, and `xargs`.

View file

@ -0,0 +1,39 @@
provider "google" {
project = "${var.project}"
}
resource "random_id" "suffix" {
byte_length = 4
}
resource "google_container_cluster" "cluster" {
name = "consul-k8s-${random_id.suffix.dec}"
project = "${var.project}"
enable_legacy_abac = true
initial_node_count = 5
zone = "${var.zone}"
min_master_version = "${var.k8s_version}"
node_version = "${var.k8s_version}"
}
resource "null_resource" "kubectl" {
triggers {
cluster = "${google_container_cluster.cluster.id}"
}
# On creation, we want to setup the kubectl credentials. The easiest way
# to do this is to shell out to gcloud.
provisioner "local-exec" {
command = "gcloud container clusters get-credentials --zone=${var.zone} ${google_container_cluster.cluster.name}"
}
# On destroy we want to try to clean up the kubectl credentials. This
# might fail if the credentials are already cleaned up or something so we
# want this to continue on failure. Generally, this works just fine since
# it only operates on local data.
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-clusters | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-cluster"
}
}

View file

@ -0,0 +1,4 @@
output "cluster_id" {
value = "${google_container_cluster.cluster.id}"
depends_on = ["null_resource.kubectl"]
}

View file

@ -0,0 +1,16 @@
variable "k8s_version" {
default = "1.10.5-gke.4"
description = "The K8S version to use for both master and nodes."
}
variable "project" {
description = <<EOF
Google Cloud Project to launch resources in. This project must have GKE
enabled and billing activated.
EOF
}
variable "zone" {
default = "us-central1-a"
description = "The zone to launch all the GKE nodes in."
}

View file

@ -0,0 +1,16 @@
locals {
service_account_path = "${path.module}/service-account.yaml"
}
resource "null_resource" "service_account" {
triggers {
cluster_id = "${var.trigger}"
}
provisioner "local-exec" {
command = <<EOF
kubectl apply -f '${local.service_account_path}'
helm init --service-account helm
EOF
}
}

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: helm
namespace: kube-system

View file

@ -0,0 +1,3 @@
variable "trigger" {
description = "When this string changes, Helm is reinstalled. This should be set to something unique to the cluster installation."
}

6
terraform/variables.tf Normal file
View file

@ -0,0 +1,6 @@
variable "project" {
description = <<EOF
Google Cloud Project to launch resources in. This project must have GKE
enabled and billing activated.
EOF
}