commit ef0d2cbceec8c7b62388718c4f5f6cacc9275503 Author: Mitchell Hashimoto Date: Fri Aug 17 22:08:03 2018 -0700 Initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..79f6cb2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +.terraform/ +terraform.tfstate* +terraform.tfvars +values.yaml diff --git a/README.md b/README.md new file mode 100644 index 0000000..e87106b --- /dev/null +++ b/README.md @@ -0,0 +1,34 @@ +# Consul Helm Chart + +This repository contains the official HashiCorp Helm chart for installing +and configuring Consul on Kubernetes. This chart supports multiple use +cases of Consul on Kubernetes depending on the values provided. + +## Prerequisites + +To use the charts here, [Helm](https://helm.sh/) must be installed in your +Kubernetes cluster. Setting up Kubernetes and Helm and is outside the scope +of this README. Please refer to the Kubernetes and Helm documentation. + +## Testing + +The Helm charts are tested in two forms: [Bats](https://github.com/bats-core/bats-core) +tests and `helm test` tests. The Bats tests test changing Helm chart values and +the effect on the install. The `helm test` tests verify that a deployed chart +appears healthy. + +To run the Bats test: `kubectl` must be configured locally to be authenticated +to a running Kubernetes cluster with Helm installed. With that in place, +just run bats: + + bats ./charts/consul/test + +If the tests fail, deployed resources in the Kubernetes cluster may not +be properly cleaned up. We recommend recycling the Kubernetes cluster to +start from a clean slate. + +**Note:** There is a Terraform configuration in the +[terraform/ directory](https://github.com/hashicorp/consul-k8s/tree/master/terraform) +that can be used to quickly bring up a GKE cluster and configure +`kubectl` and `helm` locally. This can be used to quickly spin up a test +cluster. diff --git a/charts/consul/Chart.yaml b/charts/consul/Chart.yaml new file mode 100644 index 0000000..15fefee --- /dev/null +++ b/charts/consul/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +name: consul +version: 0.1.0 +description: Install and configure Consul on Kubernetes. +home: https://www.consul.io +sources: + - https://github.com/hashicorp/consul + - https://github.com/hashicorp/consul-k8s diff --git a/charts/consul/templates/_helpers.tpl b/charts/consul/templates/_helpers.tpl new file mode 100644 index 0000000..5126558 --- /dev/null +++ b/charts/consul/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to +this (by the DNS naming spec). If release name contains chart name it will +be used as a full name. +*/}} +{{- define "consul.namePrefix" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Compute the maximum number of unavailable replicas for the PodDisruptionBudget. +This defaults to (n/2)-1 where n is the number of members of the server cluster. +*/}} +{{- define "consul.pdb.maxUnavailable" -}} +{{- if .Values.server.disruptionBudget.maxUnavailable -}} +{{ .Values.server.disruptionBudget.maxUnavailable -}} +{{- else -}} +{{- ceil (sub (div (int .Values.server.replicas) 2) 1) -}} +{{- end -}} +{{- end -}} diff --git a/charts/consul/templates/client-daemonset.yaml b/charts/consul/templates/client-daemonset.yaml new file mode 100644 index 0000000..1005284 --- /dev/null +++ b/charts/consul/templates/client-daemonset.yaml @@ -0,0 +1,85 @@ +# DaemonSet to run the Consul clients on every node. +{{- if .Values.client.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: consul +spec: + selector: + matchLabels: + app: consul + template: + metadata: + labels: + app: consul + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + terminationGracePeriodSeconds: 10 + + # Consul agents require a directory for data, even clients. The data + # is okay to be wiped though if the Pod is removed, so just use an + # emptyDir volume. + volumes: + - name: data + emptyDir: {} + + containers: + - name: consul + image: "{{ .Values.client.image }}" + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: + - "/bin/sh" + - "-ec" + - | + exec /bin/consul agent \ + -advertise="${POD_IP}" \ + -bind=0.0.0.0 \ + -client=0.0.0.0 \ + -datacenter={{ .Values.server.datacenter }} \ + -data-dir=/consul/data \ + {{- if (.Values.client.join) and (gt (len .Values.client.join) 0) }} + {{- range $value := .Values.client.join }} + -retry-join={{ $value }} \ + {{- end }} + {{- else }} + {{- if .Values.server.enabled }} + {{- range $index := until (.Values.server.replicas | int) }} + -retry-join=consul-server-{{ $index }}.consul-server.${NAMESPACE}.svc \ + {{- end }} + {{- end }} + {{- end }} + -domain={{ .Values.common.domain }} + volumeMounts: + - name: data + mountPath: /consul/data + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - consul leave + ports: + - containerPort: 8500 + hostPort: 8500 + name: http + - containerPort: 8301 + name: serflan + - containerPort: 8302 + name: serfwan + - containerPort: 8300 + name: server + - containerPort: 8600 + name: dns + resources: +{{ toYaml .Values.server.resources | indent 12 }} +{{- end }} diff --git a/charts/consul/templates/connect-inject-deployment.yaml b/charts/consul/templates/connect-inject-deployment.yaml new file mode 100644 index 0000000..2d9ccce --- /dev/null +++ b/charts/consul/templates/connect-inject-deployment.yaml @@ -0,0 +1,69 @@ +# The deployment for running the Connect sidecar injector +{{- if .Values.connectInject.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul-connect-injector-webhook-deployment + labels: + app: consul-connect-injector +spec: + replicas: 1 + selector: + matchLabels: + app: consul-connect-injector + template: + metadata: + labels: + app: consul-connect-injector + spec: + containers: + - name: sidecar-injector + image: us.gcr.io/mitchellh-k8s/consul-k8s:latest + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s inject \ + -default-inject={{ .Values.connectInject.default }} \ + -listen=:8080 \ +{{- if .Values.connectInject.certs.secretName }} + -tls-cert-file=/etc/connect-injector/certs/{{ .Values.connectInject.certs.certName }} + -tls-key-file=/etc/connect-injector/certs/{{ .Values.connectInject.certs.keyName }} +{{- else }} + -tls-auto=consul-connect-injector-cfg \ + -tls-auto-hosts=consul-connect-injector-svc,consul-connect-injector-svc.${NAMESPACE},consul-connect-injector-svc.${NAMESPACE}.svc +{{- end }} + livenessProbe: + tcpSocket: + port: 8080 + failureThreshold: 2 + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /health/ready + port: 8080 + scheme: HTTPS + failureThreshold: 2 + initialDelaySeconds: 2 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 +{{- if .Values.connectInject.certs.secretName }} + volumeMounts: + - name: certs + mountPath: /etc/connect-injector/certs + readOnly: true + volumes: + - name: certs + secret: + secretName: {{ .Values.connectInject.certs.secretName }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/connect-inject-mutatingwebhook.yaml b/charts/consul/templates/connect-inject-mutatingwebhook.yaml new file mode 100644 index 0000000..c49d3f0 --- /dev/null +++ b/charts/consul/templates/connect-inject-mutatingwebhook.yaml @@ -0,0 +1,26 @@ +# The MutatingWebhookConfiguration to enable the Connect injector. +{{- if (.Values.connectInject.enabled) and (.Values.connectInject.caBundle) }} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: consul-connect-injector-cfg + labels: + app: consul-connect-injector +webhooks: + - name: consul-connect-injector.consul.hashicorp.com + clientConfig: + service: + name: consul-connect-injector-svc + namespace: default + path: "/mutate" + caBundle: {{ .Values.connectInject.caBundle }} + rules: + - operations: [ "CREATE" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] +{{- if .Values.connectInject.namespaceSelector }} + namespaceSelector: +{{ tpl .Values.connectInject.namespaceSelector . | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/connect-inject-service.yaml b/charts/consul/templates/connect-inject-service.yaml new file mode 100644 index 0000000..cafe095 --- /dev/null +++ b/charts/consul/templates/connect-inject-service.yaml @@ -0,0 +1,16 @@ +# The service for the Connect sidecar injector +{{- if .Values.connectInject.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: consul-connect-injector-svc + labels: + app: consul-connect-injector +spec: + ports: + - port: 443 + targetPort: 8080 + selector: + app: consul-connect-injector +{{- end }} + diff --git a/charts/consul/templates/server-config-configmap.yaml b/charts/consul/templates/server-config-configmap.yaml new file mode 100644 index 0000000..ef44d80 --- /dev/null +++ b/charts/consul/templates/server-config-configmap.yaml @@ -0,0 +1,10 @@ +# StatefulSet to run the actual Consul server cluster. +{{- if .Values.server.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: consul-server-config +data: + extra-from-values.json: |- +{{ tpl .Values.server.extraConfig . | indent 4 }} +{{- end }} diff --git a/charts/consul/templates/server-disruptionbudget.yaml b/charts/consul/templates/server-disruptionbudget.yaml new file mode 100644 index 0000000..247d791 --- /dev/null +++ b/charts/consul/templates/server-disruptionbudget.yaml @@ -0,0 +1,13 @@ +# PodDisruptionBudget to prevent degrading the server cluster through +# voluntary cluster changes. +{{- if (.Values.server.enabled) and (.Values.server.disruptionBudget.enabled) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: consul-pdb +spec: + maxUnavailable: {{ template "consul.pdb.maxUnavailable" . }} + selector: + matchLabels: + app: consul-server +{{- end }} diff --git a/charts/consul/templates/server-service.yaml b/charts/consul/templates/server-service.yaml new file mode 100644 index 0000000..78b30ce --- /dev/null +++ b/charts/consul/templates/server-service.yaml @@ -0,0 +1,51 @@ +# Headless service for Consul server DNS entries. This service should only +# point to Consul servers. For access to an agent, one should assume that +# the agent is installed locally on the node and the NODE_IP should be used. +# If the node can't run a Consul agent, then this service can be used to +# communicate directly to a server agent. +{{- if .Values.server.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: consul-server + labels: + name: consul-server + annotations: + # This must be set in addition to publishNotReadyAddresses due + # to an open issue where it may not work: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + # We want the servers to become available even if they're not ready + # since this DNS is also used for join operations. + publishNotReadyAddresses: true + ports: + - name: http + port: 8500 + targetPort: 8500 + - name: serflan-tcp + protocol: "TCP" + port: 8301 + targetPort: 8301 + - name: serflan-udp + protocol: "UDP" + port: 8301 + targetPort: 8301 + - name: serfwan-tcp + protocol: "TCP" + port: 8302 + targetPort: 8302 + - name: serfwan-udp + protocol: "UDP" + port: 8302 + targetPort: 8302 + - name: server + port: 8300 + targetPort: 8300 + - name: dns + port: 8600 + targetPort: 8600 + selector: + app: consul-server +{{- end }} diff --git a/charts/consul/templates/server-statefulset.yaml b/charts/consul/templates/server-statefulset.yaml new file mode 100644 index 0000000..e7df2aa --- /dev/null +++ b/charts/consul/templates/server-statefulset.yaml @@ -0,0 +1,126 @@ +# StatefulSet to run the actual Consul server cluster. +{{- if .Values.server.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: consul-server +spec: + selector: + matchLabels: + app: consul-server + serviceName: consul-server + podManagementPolicy: Parallel + replicas: {{ .Values.server.replicas }} + {{- if (gt (int .Values.server.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.server.updatePartition }} + {{- end }} + template: + metadata: + labels: + app: consul-server + annotations: + "consul.hashicorp.com/connect-inject": "false" + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - consul-server + topologyKey: kubernetes.io/hostname + terminationGracePeriodSeconds: 10 + securityContext: + fsGroup: 1000 + volumes: + - name: config + configMap: + name: consul-server-config + containers: + - name: consul + image: "{{ .Values.server.image }}" + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: + - "/bin/sh" + - "-ec" + - | + exec /bin/consul agent \ + -advertise="${POD_IP}" \ + -bind=0.0.0.0 \ + -bootstrap-expect={{ .Values.server.bootstrapExpect }} \ + -client=0.0.0.0 \ + -config-dir=/consul/config \ + -datacenter={{ .Values.server.datacenter }} \ + -data-dir=/consul/data \ + -domain={{ .Values.common.domain }} \ + {{- if .Values.server.connect }} + -hcl="connect { enabled = true }" \ + {{- end }} + {{- if .Values.ui.enabled }} + -ui \ + {{- end }} + {{- range $index := until (.Values.server.replicas | int) }} + -retry-join=consul-server-{{ $index }}.consul-server.${NAMESPACE}.svc \ + {{- end }} + -server + volumeMounts: + - name: data + mountPath: /consul/data + - name: config + mountPath: /consul/config + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - consul leave + ports: + - containerPort: 8500 + name: http + - containerPort: 8301 + name: serflan + - containerPort: 8302 + name: serfwan + - containerPort: 8300 + name: server + - containerPort: 8600 + name: dns + readinessProbe: + # NOTE(mitchellh): when our HTTP status endpoints support the + # proper status codes, we should switch to that. This is temporary. + exec: + command: + - "/bin/sh" + - "-ec" + - | + curl http://127.0.0.1:8500/v1/status/leader 2>/dev/null | \ + grep -E '".+"' + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 5 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.server.storage }} +{{- end }} diff --git a/charts/consul/templates/tests/test-config.yaml b/charts/consul/templates/tests/test-config.yaml new file mode 100644 index 0000000..ddfe6f6 --- /dev/null +++ b/charts/consul/templates/tests/test-config.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "consul.namePrefix" . }}-tests +data: + run.sh: |- + @test "Testing Consul cluster has quorum" { + [ `kubectl exec {{ template "consul.namePrefix" . }}-server-0 consul members --namespace={{ .Release.Namespace }} | grep server | wc -l` -ge "3" ] + } diff --git a/charts/consul/templates/tests/test-runner.yaml b/charts/consul/templates/tests/test-runner.yaml new file mode 100644 index 0000000..270a993 --- /dev/null +++ b/charts/consul/templates/tests/test-runner.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ .Release.Name }}-test-{{ randAlphaNum 5 | lower }}" + annotations: + "helm.sh/hook": test-success +spec: + initContainers: + - name: test-framework + image: dduportal/bats:0.4.0 + command: + - "bash" + - "-c" + - | + set -ex + # copy bats to tools dir + cp -R /usr/local/libexec/ /tools/bats/ + volumeMounts: + - mountPath: /tools + name: tools + containers: + - name: {{ .Release.Name }}-test + image: {{ .Values.test.image }}:{{ .Values.test.imageTag }} + command: ["/tools/bats/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + - mountPath: /tools + name: tools + volumes: + - name: tests + configMap: + name: {{ template "consul.namePrefix" . }}-tests + - name: tools + emptyDir: {} + restartPolicy: Never diff --git a/charts/consul/templates/ui-service.yaml b/charts/consul/templates/ui-service.yaml new file mode 100644 index 0000000..26da9f6 --- /dev/null +++ b/charts/consul/templates/ui-service.yaml @@ -0,0 +1,23 @@ +# Headless service for Consul server DNS entries. This service should only +# point to Consul servers. For access to an agent, one should assume that +# the agent is installed locally on the node and the NODE_IP should be used. +# If the node can't run a Consul agent, then this service can be used to +# communicate directly to a server agent. +{{- if (.Values.server.enabled) and (.Values.ui.enabled) and (.Values.ui.service) }} +apiVersion: v1 +kind: Service +metadata: + name: consul-ui + labels: + name: consul-ui +spec: + selector: + app: consul-server + ports: + - name: http + port: 80 + targetPort: 8500 + {{- if .Values.ui.serviceType }} + type: {{ .Values.ui.serviceType }} + {{- end }} +{{- end }} diff --git a/charts/consul/test/_helpers.bash b/charts/consul/test/_helpers.bash new file mode 100644 index 0000000..521a10a --- /dev/null +++ b/charts/consul/test/_helpers.bash @@ -0,0 +1,45 @@ +# name_prefix returns the prefix of the resources within Kubernetes. +name_prefix() { + printf "consul" +} + +# helm_install installs the Consul chart. +helm_install() { + helm install --name consul --wait ${BATS_TEST_DIRNAME}/.. +} + +# helm_delete deletes the Consul chart and all resources. +helm_delete() { + helm delete --purge consul + kubectl delete --all pvc +} + +# wait for a pod to be ready +wait_for_ready() { + POD_NAME=$1 + + check() { + # This requests the pod and checks whether the status is running + # and the ready state is true. If so, it outputs the name. Otherwise + # it outputs empty. Therefore, to check for success, check for nonzero + # string length. + kubectl get pods $1 -o json | \ + jq -r 'select( + .status.phase == "Running" and + ([ .status.conditions[] | select(.type == "Ready" and .status == "True") ] | length) == 1 + ) | .metadata.namespace + "/" + .metadata.name' + } + + for i in $(seq 30); do + if [ -n "$(check ${POD_NAME})" ]; then + echo "${POD_NAME} is ready." + return + fi + + echo "Waiting for ${POD_NAME} to be ready..." + sleep 2 + done + + echo "${POD_NAME} never became ready." + exit 1 +} diff --git a/charts/consul/test/server.bats b/charts/consul/test/server.bats new file mode 100644 index 0000000..dc358f6 --- /dev/null +++ b/charts/consul/test/server.bats @@ -0,0 +1,17 @@ +#!/usr/bin/env bats + +load _helpers + +@test "server: default, comes up healthy" { + helm_install + wait_for_ready $(name_prefix)-server-0 + + # Verify there are three servers + local server_count=$(kubectl exec "$(name_prefix)-server-0" consul members | + grep server | + wc -l) + [ "${server_count}" -eq "3" ] + + # Clean up + helm_delete +} diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 0000000..8e72a9f --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,23 @@ +# Terraform + +This folder contains a Terraform configuration that can be used to setup +an example cluster. These are not meant to be production ready modules for +using Consul with Kubernetes. + +The pre-requisites for Terraform are: + + * Google Cloud authentication. See [Google Application Default Credentials](https://cloud.google.com/docs/authentication/production). You may also reuse your `gcloud` credentials by exposing them as application defaults by running `gcloud auth application-default login`. + * `gcloud` installed and configured locally with GKE components. + * The following programs available on the PATH: `kubectl`, `helm`, `grep`, `xargs`. + +With that available, run the following: + +``` +$ terraform init +$ terraform apply +``` + +The apply will ask you for the name of the project to setup the cluster. +After this, everything will be setup, your local `kubectl` credentials will +be configured, and you may use `helm` directly. + diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000..f68f9ab --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,9 @@ +module "gke" { + source = "./modules/gke" + project = "${var.project}" +} + +module "helm" { + source = "./modules/helm" + trigger = "${module.gke.cluster_id}" +} diff --git a/terraform/modules/README.md b/terraform/modules/README.md new file mode 100644 index 0000000..adcc229 --- /dev/null +++ b/terraform/modules/README.md @@ -0,0 +1,9 @@ +# Terraform Modules + +This directory contains reusable [Terraform](https://www.terraform.io) modules +for various tasks related to Consul and Kubernetes, from spinning up a demo +cluster to running tests. + +These modules are used by our own automated systems for verifying the +functionality of the Consul and Kubernetes components. These modules aren't +meant to be production-ready deployment modules. diff --git a/terraform/modules/gke/README.md b/terraform/modules/gke/README.md new file mode 100644 index 0000000..d317e64 --- /dev/null +++ b/terraform/modules/gke/README.md @@ -0,0 +1,22 @@ +# GKE Cluster Setup + +This module creates a GKE cluster for running and testing the Consul and +Kubernetes integrations. The GKE cluster is an opinionated setup and this +module is not meant to be a generic GKE module. This module also configures +`kubectl` credentials. + +After this module completes, a GKE cluster is created and `kubectl` is +configured such that you can immediately verify the Kubernetes cluster: + + kubectl get componentstatus + +**WARNING:** This module will create resources that cost money. This does +not use free tier resources. + +## Requirements + + * Google Cloud authentication. See [Google Application Default Credentials](https://cloud.google.com/docs/authentication/production). You may also reuse your `gcloud` credentials by exposing them as application defaults by running `gcloud auth application-default login`. + * `gcloud` installed and configured locally with GKE components and available on the PATH. + * `kubectl` installed locally and available on the PATH. + * A Google Cloud Project with GKE and billing activated. + * Unix-like environment that supports piping, `grep`, and `xargs`. diff --git a/terraform/modules/gke/main.tf b/terraform/modules/gke/main.tf new file mode 100644 index 0000000..3767f4e --- /dev/null +++ b/terraform/modules/gke/main.tf @@ -0,0 +1,39 @@ +provider "google" { + project = "${var.project}" +} + +resource "random_id" "suffix" { + byte_length = 4 +} + +resource "google_container_cluster" "cluster" { + name = "consul-k8s-${random_id.suffix.dec}" + project = "${var.project}" + enable_legacy_abac = true + initial_node_count = 5 + zone = "${var.zone}" + min_master_version = "${var.k8s_version}" + node_version = "${var.k8s_version}" +} + +resource "null_resource" "kubectl" { + triggers { + cluster = "${google_container_cluster.cluster.id}" + } + + # On creation, we want to setup the kubectl credentials. The easiest way + # to do this is to shell out to gcloud. + provisioner "local-exec" { + command = "gcloud container clusters get-credentials --zone=${var.zone} ${google_container_cluster.cluster.name}" + } + + # On destroy we want to try to clean up the kubectl credentials. This + # might fail if the credentials are already cleaned up or something so we + # want this to continue on failure. Generally, this works just fine since + # it only operates on local data. + provisioner "local-exec" { + when = "destroy" + on_failure = "continue" + command = "kubectl config get-clusters | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-cluster" + } +} diff --git a/terraform/modules/gke/outputs.tf b/terraform/modules/gke/outputs.tf new file mode 100644 index 0000000..67e0f1a --- /dev/null +++ b/terraform/modules/gke/outputs.tf @@ -0,0 +1,4 @@ +output "cluster_id" { + value = "${google_container_cluster.cluster.id}" + depends_on = ["null_resource.kubectl"] +} diff --git a/terraform/modules/gke/variables.tf b/terraform/modules/gke/variables.tf new file mode 100644 index 0000000..d4f6b83 --- /dev/null +++ b/terraform/modules/gke/variables.tf @@ -0,0 +1,16 @@ +variable "k8s_version" { + default = "1.10.5-gke.4" + description = "The K8S version to use for both master and nodes." +} + +variable "project" { + description = <