Move all terraform setup to test/terraform

This commit is contained in:
Mitchell Hashimoto 2018-08-20 16:15:47 -07:00
parent 90ce7e614c
commit 36e8afc7a0
No known key found for this signature in database
GPG key ID: 744E147AA52F5B0A
17 changed files with 55 additions and 293 deletions

1
.gitignore vendored
View file

@ -1,5 +1,6 @@
.DS_Store
.terraform/
.terraform.tfstate*
terraform.tfstate*
terraform.tfvars
values.dev.yaml

View file

@ -45,7 +45,7 @@ be properly cleaned up. We recommend recycling the Kubernetes cluster to
start from a clean slate.
**Note:** There is a Terraform configuration in the
[terraform/ directory](https://github.com/hashicorp/consul-k8s/tree/master/terraform)
[test/terraform/ directory](https://github.com/hashicorp/consul-helm/tree/master/test/terraform)
that can be used to quickly bring up a GKE cluster and configure
`kubectl` and `helm` locally. This can be used to quickly spin up a test
cluster.

View file

@ -1,23 +0,0 @@
# Terraform
This folder contains a Terraform configuration that can be used to setup
an example cluster. These are not meant to be production ready modules for
using Consul with Kubernetes.
The pre-requisites for Terraform are:
* Google Cloud authentication. See [Google Application Default Credentials](https://cloud.google.com/docs/authentication/production). You may also reuse your `gcloud` credentials by exposing them as application defaults by running `gcloud auth application-default login`.
* `gcloud` installed and configured locally with GKE components.
* The following programs available on the PATH: `kubectl`, `helm`, `grep`, `xargs`.
With that available, run the following:
```
$ terraform init
$ terraform apply
```
The apply will ask you for the name of the project to setup the cluster.
After this, everything will be setup, your local `kubectl` credentials will
be configured, and you may use `helm` directly.

View file

@ -1,9 +0,0 @@
module "gke" {
source = "./modules/gke"
project = "${var.project}"
}
module "helm" {
source = "./modules/helm"
trigger = "${module.gke.cluster_id}"
}

View file

@ -1,9 +0,0 @@
# Terraform Modules
This directory contains reusable [Terraform](https://www.terraform.io) modules
for various tasks related to Consul and Kubernetes, from spinning up a demo
cluster to running tests.
These modules are used by our own automated systems for verifying the
functionality of the Consul and Kubernetes components. These modules aren't
meant to be production-ready deployment modules.

View file

@ -1,22 +0,0 @@
# GKE Cluster Setup
This module creates a GKE cluster for running and testing the Consul and
Kubernetes integrations. The GKE cluster is an opinionated setup and this
module is not meant to be a generic GKE module. This module also configures
`kubectl` credentials.
After this module completes, a GKE cluster is created and `kubectl` is
configured such that you can immediately verify the Kubernetes cluster:
kubectl get componentstatus
**WARNING:** This module will create resources that cost money. This does
not use free tier resources.
## Requirements
* Google Cloud authentication. See [Google Application Default Credentials](https://cloud.google.com/docs/authentication/production). You may also reuse your `gcloud` credentials by exposing them as application defaults by running `gcloud auth application-default login`.
* `gcloud` installed and configured locally with GKE components and available on the PATH.
* `kubectl` installed locally and available on the PATH.
* A Google Cloud Project with GKE and billing activated.
* Unix-like environment that supports piping, `grep`, and `xargs`.

View file

@ -1,45 +0,0 @@
provider "google" {
project = "${var.project}"
}
resource "random_id" "suffix" {
byte_length = 4
}
resource "google_container_cluster" "cluster" {
name = "consul-k8s-${random_id.suffix.dec}"
project = "${var.project}"
enable_legacy_abac = true
initial_node_count = 5
zone = "${var.zone}"
min_master_version = "${var.k8s_version}"
node_version = "${var.k8s_version}"
}
resource "null_resource" "kubectl" {
triggers {
cluster = "${google_container_cluster.cluster.id}"
}
# On creation, we want to setup the kubectl credentials. The easiest way
# to do this is to shell out to gcloud.
provisioner "local-exec" {
command = "gcloud container clusters get-credentials --zone=${var.zone} ${google_container_cluster.cluster.name}"
}
# On destroy we want to try to clean up the kubectl credentials. This
# might fail if the credentials are already cleaned up or something so we
# want this to continue on failure. Generally, this works just fine since
# it only operates on local data.
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-clusters | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-cluster"
}
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-contexts | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-context"
}
}

View file

@ -1,4 +0,0 @@
output "cluster_id" {
value = "${google_container_cluster.cluster.id}"
depends_on = ["null_resource.kubectl"]
}

View file

@ -1,16 +0,0 @@
variable "k8s_version" {
default = "1.10.5-gke.4"
description = "The K8S version to use for both master and nodes."
}
variable "project" {
description = <<EOF
Google Cloud Project to launch resources in. This project must have GKE
enabled and billing activated.
EOF
}
variable "zone" {
default = "us-central1-a"
description = "The zone to launch all the GKE nodes in."
}

View file

@ -1,16 +0,0 @@
locals {
service_account_path = "${path.module}/service-account.yaml"
}
resource "null_resource" "service_account" {
triggers {
cluster_id = "${var.trigger}"
}
provisioner "local-exec" {
command = <<EOF
kubectl apply -f '${local.service_account_path}'
helm init --service-account helm
EOF
}
}

View file

@ -1,3 +0,0 @@
variable "trigger" {
description = "When this string changes, Helm is reinstalled. This should be set to something unique to the cluster installation."
}

View file

@ -1,6 +0,0 @@
variable "project" {
description = <<EOF
Google Cloud Project to launch resources in. This project must have GKE
enabled and billing activated.
EOF
}

View file

@ -1,88 +0,0 @@
#!/bin/bash
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
SCRIPT_DIR=$(pwd)
popd > /dev/null
function usage {
cat <<-EOF
Usage: ${SCRIPT_NAME} [<options ...>]
Description:
This script will initialize
This script will build the consul-k8s binary on the local system.
All the requisite tooling must be installed for this to be
successful.
Options:
-s | --source DIR Path to source to build.
Defaults to "${SOURCE_DIR}"
-o | --os OSES Space separated string of OS
platforms to build.
-a | --arch ARCH Space separated string of
architectures to build.
-h | --help Print this help text.
EOF
}
function main {
declare sdir="${SOURCE_DIR}"
declare build_os=""
declare build_arch=""
while test $# -gt 0
do
case "$1" in
-h | --help )
usage
return 0
;;
-s | --source )
if test -z "$2"
then
err_usage "ERROR: option -s/--source requires an argument"
return 1
fi
if ! test -d "$2"
then
err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source"
return 1
fi
sdir="$2"
shift 2
;;
-o | --os )
if test -z "$2"
then
err_usage "ERROR: option -o/--os requires an argument"
return 1
fi
build_os="$2"
shift 2
;;
-a | --arch )
if test -z "$2"
then
err_usage "ERROR: option -a/--arch requires an argument"
return 1
fi
build_arch="$2"
shift 2
;;
* )
err_usage "ERROR: Unknown argument: '$1'"
return 1
;;
esac
done
build_consul_local "${sdir}" "${build_os}" "${build_arch}" || return 1
return 0
}
main "$@"
exit $?

View file

@ -1,51 +0,0 @@
#!/bin/bash
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
SCRIPT_DIR=$(pwd)
popd > /dev/null
function usage {
cat <<-EOF
Usage: ${SCRIPT_NAME} [<options ...>]
Description:
This script will install the necessary components for a Docker-based
test.
This script will build the consul-k8s binary on the local system.
All the requisite tooling must be installed for this to be
successful.
Options:
-s | --source DIR Path to source to build.
Defaults to "${SOURCE_DIR}"
-o | --os OSES Space separated string of OS
platforms to build.
-a | --arch ARCH Space separated string of
architectures to build.
-h | --help Print this help text.
EOF
}
function main {
declare sdir="${SOURCE_DIR}"
declare build_os=""
declare build_arch=""
while test $# -gt 0
do
case "$1" in
-h | --help )
usage
return 0
;;
* )
err_usage "ERROR: Unknown argument: '$1'"
return 1
;;
esac
done
build_consul_local "${sdir}" "${build_os}" "${build_arch}" || return 1
return 0
}
main "$@"
exit $?

View file

@ -1,3 +1,7 @@
locals {
service_account_path = "${path.module}/service-account.yaml"
}
provider "google" {
project = "${var.project}"
}
@ -16,3 +20,47 @@ resource "google_container_cluster" "cluster" {
node_version = "${var.k8s_version}"
}
resource "null_resource" "kubectl" {
count = "${var.init_cli ? 1 : 0 }"
triggers {
cluster = "${google_container_cluster.cluster.id}"
}
# On creation, we want to setup the kubectl credentials. The easiest way
# to do this is to shell out to gcloud.
provisioner "local-exec" {
command = "gcloud container clusters get-credentials --zone=${var.zone} ${google_container_cluster.cluster.name}"
}
# On destroy we want to try to clean up the kubectl credentials. This
# might fail if the credentials are already cleaned up or something so we
# want this to continue on failure. Generally, this works just fine since
# it only operates on local data.
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-clusters | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-cluster"
}
provisioner "local-exec" {
when = "destroy"
on_failure = "continue"
command = "kubectl config get-contexts | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-context"
}
}
resource "null_resource" "helm" {
count = "${var.init_cli ? 1 : 0 }"
triggers {
cluster = "${google_container_cluster.cluster.id}"
}
provisioner "local-exec" {
command = <<EOF
kubectl apply -f '${local.service_account_path}'
helm init --service-account helm
EOF
}
}

View file

@ -15,3 +15,8 @@ variable "zone" {
default = "us-central1-a"
description = "The zone to launch all the GKE nodes in."
}
variable "init_cli" {
default = false
description = "Whether to init the CLI tools kubectl, helm, etc. or not."
}