Setup the PR phase in Github and working to get blue-green deployment working in AWS.

* Added a PR and deploy Github actions file for the first steps in the
pipeline.

* Building out deployment with blue-green, need the docker file in
main for CodeBuild.
This commit is contained in:
Joe Bailey 2023-02-12 13:05:36 -08:00 committed by GitHub
parent ca055dbbe1
commit 517d5e0979
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
23 changed files with 1908 additions and 31 deletions

View file

@ -6,6 +6,7 @@ charset = utf-8
end_of_line = lf
insert_final_newline = true
indent_style = space
indent_size = 2
[*.{java,xml}]
indent_size = 4

65
.github/workflows/deploy.yml vendored Normal file
View file

@ -0,0 +1,65 @@
name: Deploy
# on:
# push:
# branches:
# - main
on:
pull_request:
branches: [main]
jobs:
deploy-infra:
name: Deploy Infrastructure
# deploy-blue:
# name: Deploy
# runs-on: ubuntu-latest
# environment: production
# steps:
# - name: Checkout
# uses: actions/checkout@v3
# - name: Configure AWS credentials
# uses: aws-actions/configure-aws-credentials@13d241b293754004c80624b5567555c4a39ffbe3
# with:
# aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
# aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# aws-region: ${{ env.AWS_REGION }}
# - name: Login to Amazon ECR
# id: login-ecr
# uses: aws-actions/amazon-ecr-login@aaf69d68aa3fb14c1d5a6be9ac61fe15b48453a2
# - name: Build, tag, and push image to Amazon ECR
# id: build-image
# env:
# ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
# IMAGE_TAG: ${{ github.sha }}
# run: |
# # Build a docker container and
# # push it to ECR so that it can
# # be deployed to ECS.
# # docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
# ./mvnw spring-boot:build-image -Dspring-boot.build-image.imageName=$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
# docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
# echo "image=$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG" >> $GITHUB_OUTPUT
# - name: Fill in the new image ID in the Amazon ECS task definition
# id: task-def
# uses: aws-actions/amazon-ecs-render-task-definition@97587c9d45a4930bf0e3da8dd2feb2a463cf4a3a
# with:
# task-definition: ${{ env.ECS_TASK_DEFINITION }}
# container-name: ${{ env.CONTAINER_NAME }}
# image: ${{ steps.build-image.outputs.image }}
# - name: Deploy Amazon ECS task definition
# uses: aws-actions/amazon-ecs-deploy-task-definition@de0132cf8cdedb79975c6d42b77eb7ea193cf28e
# with:
# task-definition: ${{ steps.task-def.outputs.task-definition }}
# service: ${{ env.ECS_SERVICE }}
# cluster: ${{ env.ECS_CLUSTER }}
# wait-for-service-stability: true

View file

@ -1,29 +0,0 @@
# This workflow will build a Java project with Maven, and cache/restore any dependencies to improve the workflow execution time
# For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven
name: Java CI with Maven
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
java: [ '17' ]
steps:
- uses: actions/checkout@v2
- name: Set up JDK ${{matrix.java}}
uses: actions/setup-java@v2
with:
java-version: ${{matrix.java}}
distribution: 'adopt'
cache: maven
- name: Build with Maven Wrapper
run: ./mvnw -B package

23
.github/workflows/pr.yml vendored Normal file
View file

@ -0,0 +1,23 @@
name: PR Verification
on:
pull_request:
branches: [main]
jobs:
build-test:
runs-on: ubuntu-latest
strategy:
matrix:
java: ["17"]
steps:
- uses: actions/checkout@v2
- name: Set up JDK ${{matrix.java}}
uses: actions/setup-java@v2
with:
java-version: ${{matrix.java}}
distribution: "adopt"
cache: maven
- name: Build with Maven Wrapper
run: ./mvnw -B package

3
.gitignore vendored
View file

@ -15,3 +15,6 @@ build/*
_site/
*.css
!petclinic.css
*_override.tf
tmp/

3
Dockerfile Normal file
View file

@ -0,0 +1,3 @@
FROM openjdk:8-jdk-alpine
COPY target/spring-petclinic-3.0.0-SNAPSHOT.jar spring-petclinic-3.0.0-SNAPSHOT.jar
ENTRYPOINT ["java","-jar","/spring-petclinic-3.0.0-SNAPSHOT.jar"]

202
infra/LICENSE Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Amazon Web Services
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

10
infra/NOTICE Normal file
View file

@ -0,0 +1,10 @@
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
ecs-refarch-continuous-deployment
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.

130
infra/README.md Normal file
View file

@ -0,0 +1,130 @@
# **NOTE:** Same reference architecture, but using [AWS Fargate](https://aws.amazon.com/fargate/) is available in [fargate branch](https://github.com/awslabs/ecs-blue-green-deployment/tree/fargate)
# Blue/Green deployments on ECS
This reference architecture is in reference to blog post on [blue green deployments on ECS](https://aws.amazon.com/blogs/compute/bluegreen-deployments-with-amazon-ecs/). It creates a continuous delivery by leveraging AWS CloudFormation templates. The templates creates resources using Amazon's Code* services to build and deploy containers onto an ECS cluster as long running services. It also includes a manual approval step facilitated by lambda function that discovers and swaps target group rules between 2 target groups, promoting the green version to production and demoting the blue version to staging.
## Pre-Requisites
This example uses [AWS Command Line Interface](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) to run Step-3 below.
Please follow [instructions](http://docs.aws.amazon.com/cli/latest/userguide/installing.html) if you haven't installed AWS CLI. Your CLI [configuration](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) need PowerUserAccess and IAMFullAccess [IAM policies](http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) associated with your credentials
```console
aws --version
```
Output from above must yield **AWS CLI version >= 1.11.37**
## Quick setup in three steps
#### 1. Fork ECS Sample app
[Fork](https://help.github.com/articles/fork-a-repo/) the [Amazon ECS sample app](https://github.com/awslabs/ecs-demo-php-simple-app) GitHub repository into your GitHub account.
Clone the ECS Sample app repo
```console
git clone https://github.com/<your_github_username>/ecs-demo-php-simple-app
```
#### 2. Clone ECS blue green repo
```console
git clone https://github.com/awslabs/ecs-blue-green-deployment
```
#### 2a (Optional) . Switch to [fargate branch](https://github.com/awslabs/ecs-blue-green-deployment/tree/fargate) , if you want to use [AWS Fargate](https://aws.amazon.com/fargate/)
```console
git checkout fargate
```
#### 3. Run bin/deploy
```console
bin/deploy
```
Here are the inputs required to launch CloudFormation templates:
* **S3 Bucket**: Enter S3 Bucket for storing your CloudFormation templates and scripts. This bucket must be in the same region where you wish to launch all the AWS resources created by this example.
* **CloudFormation Stack Name**: Enter CloudFormation Stack Name to create stacks
* **GitHubUser**: Enter your GitHub Username
* **GitHubToken**: Enter your GitHub Token for authentication ([https://github.com/settings/tokens](https://github.com/settings/tokens))
Sit back and relax until all the resources are created for you. After the templates are created, you can open ELB DNS URL to see the ECS Sample App
For testing Blue Green deployment, Go ahead and make a change in ECS Sample App. For ex, edit src/index.php and update the background-color to #20E941 to change to Green background color. After commiting to your repo, Code Pipeline will pick the change automatically and go through the process of updating your application.
Click on "Review" button in Code pipeline management console and Approve the change. Now you should see the new version of the application with Green background.
## Resources created in this exercise
Count | AWS resources
| --- | --- |
7 | [AWS CloudFormation templates](https://aws.amazon.com/cloudformation/)
1 | [Amazon VPC](https://aws.amazon.com/vpc/) (10.215.0.0/16)
1 | [AWS CodePipeline](https://aws.amazon.com/codepipeline/)
2 | [AWS CodeBuild projects](https://aws.amazon.com/codebuild/)
1 | [Amazon S3 Bucket](https://aws.amazon.com/s3/)
1 | [AWS Lambda](https://aws.amazon.com/lambda/)
1 | [Amazon ECS Cluster](https://aws.amazon.com/ecs/)
2 | [Amazon ECS Service](https://aws.amazon.com/ecs/)
1 | [Application Load Balancer](https://aws.amazon.com/elasticloadbalancing/applicationloadbalancer/)
2 | [Application Load Balancer Target Groups](https://aws.amazon.com/elasticloadbalancing/applicationloadbalancer/)
## Implementation details
During first phase, the parent template (ecs-blue-green-deployment.yaml) kicks off creating VPC and the resources in deployment-pipeline template.
This creates CodePipeline, CodeBuild and Lambda resources. Once this is complete, second phase creates the rest of resources such as ALB,
Target Groups and ECS resources. Below is a screenshot of CodePipeline once all CloudFormation templates are completed
![codepipeline](images/codepipeline1.png)
The templates create two services on ECS cluster and associates a Target Group to each service as depicted in the diagram.
Blue Target Group is associated with Port 80 that represents Live/Production traffic and Green Target Group is associated with Port 8080 and is available for new version of the Application.
The new version of the application can be tested by accessing the load balancer at port 8080, example http://LOAD_BALANCER_URL:8080 .If you want to restrict the traffic ranges accessing beta version of the code, you may modify the Ingress rules [here](https://github.com/awslabs/ecs-blue-green-deployment/blob/master/templates/load-balancer.yaml#L30).
During initial rollout, both Blue and Green service serve same application versions.As you introduce new release, CodePipeline picks those changes and are pushed down the pipeline using CodeBuild and deployed to the Green service. In order to switch from Green to Blue service (or from beta to Prod environment), you have to _Approve_** the release by going to CodePipeline management console and clicking _Review_** button. Approving the change will trigger Lambda function (blue_green_flip.py) which does the swap of ALB Target Groups. If you discover bugs while in Production, you can revert to previous application version by clicking and approving the change again. This in turn will put Blue service back into Production. To simplify identifying which Target Groups are serving Live traffic, we have added Tags on ALB Target Groups. Target Group **IsProduction** Tag will say **true** for Production application.
![bluegreen](images/ecs-bluegreen.png)
Here is further explaination for each stages of Code Pipeline.
**During Build stage**
* During first phase, CodeBuild builds the docker container image and pushes to [Amazon ECR](https://aws.amazon.com/ecr/).
* During second phase, Codebuild executes scripts/deployer.py which executes the following scripted logic
1. Retrieve artifact (build.json) from the previous phase (CodeBuild phase, which builds application container images)
2. Check if the load balancer exists. Name of the ELB is fed through environment variable by the pipeline.
3. Get tag key value of the target group, running on port 8080 and 80 with KeyName as "Identifier". It will be either "Code1" or "Code2"
4. Get Sha of the image id running on target group at port 8080 and 80
5. Edit the build.json retrieved from step-1 and append the values retrieved in step3 and step4
6. Save the modified build.json. This file is the output from codebuild project and fed as an input to the CloudFormation
execution stage.This json file has the following schema
{
"Code1" : "CONTAINER_TAG1",
"Code2" : "CONTAINER_TAG2"
}
If the load balancer does not exists (as found in step-2), this would imply that the stack is executed for the first time, and the values of "CONTAINER_TAG1" and CONTAINER_TAG2" will be the same and default to the
value retrieved from build.json in step-1
**During Deploy stage**
CodePipeline executes templates/ecs-cluster.yaml. The CloudFormation input parameters with KeyName as "Code1" and "Code2" are overwritten with the values as written in the build.json, retrieved from the second phase of Build Stage.
**During Review stage**
The pipeline offers manual "Review" button so that the approver can review code and Approve new release.
Providing approvals at this stage will trigger the Lambda function (blue_green_flip.py) which swaps the Green Target Group to Live traffic. You can checkout sample app to see new release change. blue_green_flip.py has the following logic scripted
1. Read Job Data from input json
2. Read Job ID from input json
3. Get parameters from input json
4. Get Load balancer name from parameters
5. Identify the TargetGroup running on this Load Balancer at port 80 and port 8080. Perform the TargetGroup Swap. Also swap the values of "IsProduction" tags.
6. Send success or failure to CodePipeline
## Cleanup
First delete ecs-cluster CloudFormation stack, this will delete both ECS services (BlueService and GreenService) and LoadBalancer stacks. Next delete the parent stack. This should delete all the resources that were created for this exercise

23
infra/bin/deploy Executable file
View file

@ -0,0 +1,23 @@
#!/bin/bash
set -o errexit -o xtrace
echo -n "Enter S3 Bucket to host the templates and scripts > "
read bucket
echo -n "Enter stackname to create or update the stack > "
read stackname
echo -n "Enter GitHub User > "
read GitHubUser
echo -n "Enter GitHubToken > "
read GitHubToken
zip deploy/templates.zip ecs-blue-green-deployment.yaml templates/*
cd scripts && zip scripts.zip * && cd ..
mv scripts/scripts.zip deploy/scripts.zip
aws s3 cp deploy/templates.zip "s3://${bucket}" --acl public-read
aws s3 cp deploy/scripts.zip "s3://${bucket}" --acl public-read
aws s3 cp ecs-blue-green-deployment.yaml "s3://${bucket}" --acl public-read
aws s3 cp --recursive templates/ "s3://${bucket}/templates" --acl public-read
aws s3 cp --recursive scripts/ "s3://${bucket}/scripts" --acl public-read
aws s3api put-bucket-versioning --bucket "${bucket}" --versioning-configuration Status=Enabled
aws cloudformation deploy --stack-name $stackname --template-file ecs-blue-green-deployment.yaml --capabilities CAPABILITY_NAMED_IAM --parameter-overrides GitHubUser=$GitHubUser GitHubToken=$GitHubToken TemplateBucket=$bucket

2
infra/deploy/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
*
!.gitignore

View file

@ -0,0 +1,80 @@
Description: >
This template illustrates reference architecture for Blue/Green Deployment on ECS. For further reference, please review BluegreenRepo ==> https://github.com/awslabs/ecs-blue-green-deployment
Parameters:
GitHubUser:
Type: String
Description: Your username on GitHub.
GitHubRepo:
Type: String
Default: spring-petclinic
Description: The repo name of the sample service.
AllowedPattern: "[A-Za-z0-9_.-]*"
MaxLength: 50
GitHubBranch:
Type: String
Default: main
Description: The branch of the repo to continuously deploy.
GitHubToken:
Type: String
Description: >
Token for the user specified above. (https://github.com/settings/tokens)
TemplateBucket:
Type: String
Description: >
S3 Bucket used for nested templates
Metadata:
AWS::CloudFormation::Interface:
ParameterLabels:
GitHubUser:
default: "User"
GitHubRepo:
default: "Repo"
GitHubBranch:
default: "Branch"
GitHubToken:
default: "Personal Access Token"
ParameterGroups:
- Label:
default: GitHub Configuration
Parameters:
- GitHubRepo
- GitHubBranch
- GitHubUser
- GitHubToken
Resources:
DeploymentPipeline:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: !Sub https://s3.amazonaws.com/${TemplateBucket}/templates/deployment-pipeline.yaml
Parameters:
GitHubUser: !Ref GitHubUser
GitHubToken: !Ref GitHubToken
GitHubRepo: !Ref GitHubRepo
GitHubBranch: !Ref GitHubBranch
TemplateBucket: !Ref TemplateBucket
Subnet1: !GetAtt VPC.Outputs.Subnet1
Subnet2: !GetAtt VPC.Outputs.Subnet2
VpcId: !GetAtt VPC.Outputs.VpcId
VpcCIDR: 10.215.0.0/16
VPC:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: !Sub https://s3.amazonaws.com/${TemplateBucket}/templates/vpc.yaml
Parameters:
Name: !Ref AWS::StackName
VpcCIDR: 10.215.0.0/16
Subnet1CIDR: 10.215.10.0/24
Subnet2CIDR: 10.215.20.0/24
Outputs:
PipelineUrl:
Description: The continuous deployment pipeline in the AWS Management Console.
Value: !GetAtt DeploymentPipeline.Outputs.PipelineUrl

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 127 KiB

View file

@ -0,0 +1,228 @@
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
# http://aws.amazon.com/apache2.0/
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import boto3
import sys
import traceback
print('Loading function')
code_pipeline = boto3.client('codepipeline')
elbclient = boto3.client('elbv2')
def put_job_success(job, message):
"""Notify CodePipeline of a successful job
Args:
job: The CodePipeline job ID
message: A message to be logged relating to the job status
Raises:
Exception: Any exception thrown by .put_job_success_result()
"""
print('Putting job success')
print(message)
code_pipeline.put_job_success_result(jobId=job)
def put_job_failure(job, message):
"""Notify CodePipeline of a failed job
Args:
job: The CodePipeline job ID
message: A message to be logged relating to the job status
Raises:
Exception: Any exception thrown by .put_job_failure_result()
"""
print('Putting job failure')
print(message)
code_pipeline.put_job_failure_result(jobId=job, failureDetails={'message': message, 'type': 'JobFailed'})
def continue_job_later(job, message):
"""Notify CodePipeline of a continuing job
This will cause CodePipeline to invoke the function again with the
supplied continuation token.
Args:
job: The JobID
message: A message to be logged relating to the job status
continuation_token: The continuation token
Raises:
Exception: Any exception thrown by .put_job_success_result()
"""
# Use the continuation token to keep track of any job execution state
# This data will be available when a new job is scheduled to continue the current execution
continuation_token = json.dumps({'previous_job_id': job})
print('Putting job continuation')
print(message)
code_pipeline.put_job_success_result(jobId=job, continuationToken=continuation_token)
def get_user_params(job_id,job_data):
"""Gets user parameter object sent from CodePipeline
Args:
job_id: The CodePipeline job ID
job_data: json job data sent from codepipeline
Raises:
Exception: Any exception caught in decoding the params
"""
try:
user_parameters = job_data['actionConfiguration']['configuration']['UserParameters']
decoded_parameters = json.loads(user_parameters)
print(decoded_parameters)
except Exception as e:
put_job_failure(job_id,e)
raise Exception('UserParameters could not be decoded as JSON')
return decoded_parameters
def swaptargetgroups(elbname):
"""Discovers the live target group and non-production target group and swaps
Args:
elbname : name of the load balancer, which has the target groups to swap
Raises:
Exception: Any exception thrown by handler
"""
elbresponse = elbclient.describe_load_balancers(Names=[elbname])
listners = elbclient.describe_listeners(LoadBalancerArn=elbresponse['LoadBalancers'][0]['LoadBalancerArn'])
for x in listners['Listeners']:
if (x['Port'] == 443):
livelistenerarn = x['ListenerArn']
if (x['Port'] == 80):
livelistenerarn = x['ListenerArn']
if (x['Port'] == 8443):
betalistenerarn = x['ListenerArn']
if (x['Port'] == 8080):
betalistenerarn = x['ListenerArn']
livetgresponse = elbclient.describe_rules(ListenerArn=livelistenerarn)
for x in livetgresponse['Rules']:
if x['Priority'] == '1':
livetargetgroup = x['Actions'][0]['TargetGroupArn']
liverulearn = x['RuleArn']
betatgresponse = elbclient.describe_rules(ListenerArn=betalistenerarn)
for x in betatgresponse['Rules']:
if x['Priority'] == '1':
betatargetgroup = x['Actions'][0]['TargetGroupArn']
betarulearn = x['RuleArn']
print("Live=" + livetargetgroup)
print("Beta=" + betatargetgroup)
modifyOnBeta = elbclient.modify_rule(
RuleArn=betarulearn,
Actions=[
{
'Type': 'forward',
'TargetGroupArn': livetargetgroup
}
]
)
print(modifyOnBeta)
modifyOnLive = elbclient.modify_rule(
RuleArn=liverulearn,
Actions=[
{
'Type': 'forward',
'TargetGroupArn': betatargetgroup
}
]
)
print(modifyOnLive)
modify_tags(livetargetgroup,"IsProduction","False")
modify_tags(betatargetgroup, "IsProduction", "True")
def modify_tags(arn,tagkey,tagvalue):
"""Modifies the tags on the target groups as an identifier, after swap has been performed to indicate,
which target group is live and which target group is non-production
Args:
arn : AWS ARN of the Target Group
tagkey: Key of the Tag
tagvalue: Value of the Tag
Raises:
Exception: Any exception thrown by handler
"""
elbclient.add_tags(
ResourceArns=[arn],
Tags=[
{
'Key': tagkey,
'Value': tagvalue
},
]
)
def handler(event, context):
""" Main haldler as an entry point of the AWS Lambda function. Handler controls the sequence of methods to call
1. Read Job Data from input json
2. Read Job ID from input json
3. Get parameters from input json
4. Get Load balancer name from parameters
5. Perform the swap on target group
6. Send success or failure to codepipeline
Args:
event : http://docs.aws.amazon.com/codepipeline/latest/userguide/actions-invoke-lambda-function.html
#actions-invoke-lambda-function-json-event-example
context: not used but required for Lambda function
Raises:
Exception: Any exception thrown by handler
"""
try:
print(event)
job_id = event['CodePipeline.job']['id']
job_data = event['CodePipeline.job']['data']
params = get_user_params(job_id,job_data)
elb_name = params['ElbName']
print("ELBNAME="+elb_name)
swaptargetgroups(elb_name)
put_job_success(job_id,"Target Group Swapped.")
except Exception as e:
print('Function failed due to exception.')
print(e)
traceback.print_exc()
put_job_failure(job_id, 'Function exception: ' + str(e))
print('Function complete.')
return "Complete."
if __name__ == "__main__":
handler(sys.argv[0], None)

196
infra/scripts/deployer.py Normal file
View file

@ -0,0 +1,196 @@
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
# http://aws.amazon.com/apache2.0/
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import os
from botocore.client import Config
import zipfile
import json
elb_name = os.environ.get('ELB_NAME')
elb_client = boto3.client('elbv2')
describe_elb_response = None
def handler():
""" Main handler as an entry point of code. Handler controls the sequence of methods to call.No inputs required.
As this runs in AWS CodeBuild, the script gets all the values from the environment variables in codebuild.
1. Retrieve artifact (build.json) from the previous stage (CodeBuild phase, which builds application container images)
2. Check if the load balancer exists. Name of the ELB is fed through environment variable by the pipeline.
3. Get tag key value of the target group, running on port 8080 and 80 with KeyName as "Identifier"
4. Get Sha of the image id running on target group at port 8080 and 80
5. Edit the build.json retrieved from step-1 and append the values retrieved in step3 and step4
6. Save the modified build.json. This file is the output from codebuild project and fed as an input to the CloudFormation
execution stage.
Args: None
Raises:
Exception: Any exception thrown by handler
"""
print(elb_name)
build_id = get_build_artifact_id(get_build_execution_id())
if check_elb_exists():
beta_identifier, beta_sha, live_identifier, live_sha = find_beta_targetgroup()
cf_inputs = { beta_identifier:str(build_id),live_identifier:live_sha }
else:
cf_inputs = {"Code1": str(build_id), "Code2": str(build_id)}
with open('cf_inputs.json', 'w+') as outfile:
json.dump(cf_inputs, outfile)
def check_elb_exists():
"""Checks if the Load Balancer Exists
Args: None
Raises: None, as we dont want to stop script execution. If the code reaches exception block, it means
that the load balancer does not exists.
Returns:
Boolean, True if Load Balancer Exists, False, if Load balancer does not exists
"""
global describe_elb_response
try:
describe_elb_response = elb_client.describe_load_balancers(Names=[elb_name])
return True
except:
print("Load Balancer does not exists")
return False
def find_beta_targetgroup():
""" Discovers the green side ( non production side) target group, which is running on port 8080.
Args: None
Returns:
beta_identifier : tag key value of the target group, running on port 8080 with KeyName as "Identifier"
beta_sha: Sha or the image id running on target group at port 8080
live_identifier : tag key value of the target group, running on port 80 with KeyName as "Identifier"
live_sha: Sha or the image id running on target group at port 80
Raises:
Exception: Any exception thrown by handler
"""
listners = elb_client.describe_listeners(LoadBalancerArn=describe_elb_response['LoadBalancers'][0]['LoadBalancerArn'])
for x in listners['Listeners']:
if (x['Port'] == 80):
livelistenerarn = x['ListenerArn']
if (x['Port'] == 8080):
betalistenerarn = x['ListenerArn']
beta_tg_response = elb_client.describe_rules(ListenerArn=betalistenerarn)
live_tg_response = elb_client.describe_rules(ListenerArn=livelistenerarn)
for x in beta_tg_response['Rules']:
if x['Priority'] == '1':
beta_target_group = x['Actions'][0]['TargetGroupArn']
betarulearn = x['RuleArn']
for x in live_tg_response['Rules']:
if x['Priority'] == '1':
live_target_group = x['Actions'][0]['TargetGroupArn']
betarulearn = x['RuleArn']
beta_identifier,beta_sha = find_beta_image_identifier(beta_target_group)
live_identifier, live_sha = find_beta_image_identifier(live_target_group)
return beta_identifier,beta_sha,live_identifier,live_sha
def find_beta_image_identifier(targetgrouparn):
"""Queries the tags on TargetGroups
Args:
targetgrouparn - Amazon ARN of the Target group that needs to be queried for the Tags
Returns:
identifier : tag key value of the target group , with KeyName as "Identifier"
sha: Sha or the image id running on target group
Raises:
Exception: Any exception thrown by handler
"""
response = elb_client.describe_tags(ResourceArns=[targetgrouparn])
identifier = None
imagesha = None
for tags in response['TagDescriptions']:
for tag in tags['Tags']:
if tag['Key'] == "Identifier":
print("Image identifier string on " + targetgrouparn + " : " + tag['Value'])
identifier = tag['Value']
if tag['Key'] == "Image":
imagesha = tag['Value']
return identifier,imagesha
def get_build_artifact_id(build_id):
"""Get artifact (build.json) from the build project . We are making this as an additional call to get the build.json
which already contains the new built repository ECR path. We could have consolidated this script and executed in the build
phase, but as codebuild accepts the input from one source only (scripts and application code are in different sources), thats
why an additional call to retrieve build.json from a different build project.
Args:
build_id - Build ID for codebuild (build phase)
Returns:
build.json
Raises:
Exception: Any exception thrown by handler
"""
codebuild_client = boto3.client('codebuild')
response = codebuild_client.batch_get_builds(
ids=[
str(build_id),
]
)
for build in response['builds']:
s3_location = build['artifacts']['location']
bucketkey = s3_location.split(":")[5]
bucket = bucketkey.split("/")[0]
key = bucketkey[bucketkey.find("/") + 1:]
s3_client = boto3.client('s3', config=Config(signature_version='s3v4'))
s3_client.download_file(bucket, key, 'downloaded_object')
zip_ref = zipfile.ZipFile('downloaded_object', 'r')
zip_ref.extractall('downloaded_folder')
zip_ref.close()
with open('downloaded_folder/build.json') as data_file:
objbuild = json.load(data_file)
print(objbuild['tag'])
return objbuild['tag']
def get_build_execution_id():
"""Query Environment Variables to reteieve "CODEBUILD_INITIATOR", which gives codebuild id.
Use this ID to call codepipeline API to retrieve last successful build ID (build phase)
Args:
None
Returns:
build id
Raises:
Exception: Any exception thrown by handler
"""
codepipeline_client = boto3.client('codepipeline')
initiator = str(os.environ.get('CODEBUILD_INITIATOR')).split("/")[-1]
response = codepipeline_client.get_pipeline_state(
name=initiator
)
for stage in response['stageStates']:
if stage['stageName'] == 'Build':
for actionstate in stage['actionStates']:
if actionstate['actionName'] == 'Build':
return actionstate['latestExecution']['externalExecutionId']
if __name__ == '__main__':
handler()

View file

@ -0,0 +1,420 @@
Description: >
Create CodePipeline and CodeBuild resources for Blue/Green Deployment on ECS. For further reference, please review BluegreenRepo ==> https://github.com/awslabs/ecs-blue-green-deployment
Parameters:
GitHubRepo:
Type: String
GitHubBranch:
Type: String
GitHubToken:
Type: String
GitHubUser:
Type: String
TemplateBucket:
Type: String
Subnet1:
Type: AWS::EC2::Subnet::Id
Subnet2:
Type: AWS::EC2::Subnet::Id
VpcId:
Type: AWS::EC2::VPC::Id
VpcCIDR:
Type: String
Resources:
BlueGreenFlipLambda:
Type: AWS::Lambda::Function
Properties:
FunctionName: !Sub lambda-${GitHubRepo}-blue-green-swap
Handler: blue_green_flip.handler
Runtime: python3.7
Description: !Sub lambda-${GitHubRepo}-blue-green-swap
Code:
S3Bucket: !Ref TemplateBucket
S3Key: scripts.zip
MemorySize: 128
Timeout: 60
Role: !GetAtt BlueGreenFlipLambdaRole.Arn
BlueGreenFlipLambdaRole:
Type: AWS::IAM::Role
Properties:
#RoleName: !Sub lambda-${GitHubRepo}-blue-green-swap-role
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal:
Service:
- lambda.amazonaws.com
Action:
- sts:AssumeRole
Path: /
BlueGreenFlipLambdaPolicy:
Type: AWS::IAM::Policy
Properties:
PolicyName: !Sub lambda-${GitHubRepo}-blue-green-swap-policy
PolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Action:
- elasticloadbalancing:*
- codepipeline:PutJobFailureResult
- codepipeline:PutJobSuccessResult
Resource: "*"
- Effect: Allow
Action:
- logs:CreateLogGroup
- logs:CreateLogStream
- logs:PutLogEvents
Resource: arn:aws:logs:*:*:*
Roles:
- !Ref BlueGreenFlipLambdaRole
Repository:
Type: AWS::ECR::Repository
DeletionPolicy: Retain
CloudFormationExecutionRole:
Type: AWS::IAM::Role
DeletionPolicy: Retain
Properties:
#RoleName: !Sub cfn-${AWS::StackName}
Path: /
AssumeRolePolicyDocument: |
{
"Statement": [{
"Effect": "Allow",
"Principal": { "Service": [ "cloudformation.amazonaws.com" ]},
"Action": [ "sts:AssumeRole" ]
}]
}
Policies:
- PolicyName: root
PolicyDocument:
Version: 2012-10-17
Statement:
- Resource: "*"
Effect: Allow
Action:
- ecs:*
- ecr:*
- iam:*
- ec2:*
- elasticloadbalancing:*
- autoscaling:*
CodeBuildServiceRole:
Type: AWS::IAM::Role
Properties:
#RoleName: !Sub cb-${AWS::StackName}
Path: /
AssumeRolePolicyDocument: |
{
"Statement": [{
"Effect": "Allow",
"Principal": { "Service": [ "codebuild.amazonaws.com" ]},
"Action": [ "sts:AssumeRole" ]
}]
}
Policies:
- PolicyName: root
PolicyDocument:
Version: 2012-10-17
Statement:
- Resource: "*"
Effect: Allow
Action:
- logs:CreateLogGroup
- logs:CreateLogStream
- logs:PutLogEvents
- ecr:GetAuthorizationToken
- cloudformation:Describe*
- elasticloadbalancing:Describe*
- codepipeline:Get*
- codebuild:BatchGetBuilds
- Resource: !Sub arn:aws:s3:::${ArtifactBucket}/*
Effect: Allow
Action:
- s3:GetObject
- s3:PutObject
- s3:GetObjectVersion
- Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${Repository}
Effect: Allow
Action:
- ecr:GetDownloadUrlForLayer
- ecr:BatchGetImage
- ecr:BatchCheckLayerAvailability
- ecr:PutImage
- ecr:InitiateLayerUpload
- ecr:UploadLayerPart
- ecr:CompleteLayerUpload
CodePipelineServiceRole:
Type: AWS::IAM::Role
Properties:
#RoleName: !Sub cp-${AWS::StackName}
Path: /
AssumeRolePolicyDocument: |
{
"Statement": [{
"Effect": "Allow",
"Principal": { "Service": [ "codepipeline.amazonaws.com" ]},
"Action": [ "sts:AssumeRole" ]
}]
}
Policies:
- PolicyName: root
PolicyDocument:
Version: 2012-10-17
Statement:
- Resource:
- !Sub arn:aws:s3:::${ArtifactBucket}/*
- !Sub arn:aws:s3:::${TemplateBucket}
- !Sub arn:aws:s3:::${TemplateBucket}/*
Effect: Allow
Action:
- s3:PutObject
- s3:GetObject
- s3:GetObjectVersion
- s3:GetBucketVersioning
- Resource: "*"
Effect: Allow
Action:
- codebuild:StartBuild
- codebuild:BatchGetBuilds
- cloudformation:*
- lambda:*
- iam:PassRole
ArtifactBucket:
Type: AWS::S3::Bucket
DeletionPolicy: Retain
CodeBuildProject:
Type: AWS::CodeBuild::Project
Properties:
Artifacts:
Type: CODEPIPELINE
Source:
Type: CODEPIPELINE
BuildSpec: |
version: 0.1
phases:
# install:
# commands:
# - apt-get update -y
# - apt-get -y install python-pip
# - pip install --upgrade python
# - pip install --upgrade awscli
pre_build:
commands:
- printenv
- echo -n "$CODEBUILD_LOG_PATH" > /tmp/build_id.out
- printf "%s:%s" "$REPOSITORY_URI" "$(cat /tmp/build_id.out)" > /tmp/build_tag.out
- printf '{"tag":"%s"}' "$(cat /tmp/build_id.out)" > /tmp/build.json
# - $(aws ecr get-login-password)
build:
commands:
- ./mvnw package
- docker build --tag "$(cat /tmp/build_tag.out)" .
# - ./mvnw spring-boot:build-image
- docker image tag spring-petclinic:3.0.0-SNAPSHOT "$(cat /tmp/build_tag.out)"
post_build:
commands:
- docker push "$(cat /tmp/build_tag.out)"
artifacts:
files: /tmp/build.json
discard-paths: yes
Environment:
ComputeType: "BUILD_GENERAL1_SMALL"
Image: "aws/codebuild/standard:6.0"
Type: "LINUX_CONTAINER"
EnvironmentVariables:
- Name: AWS_DEFAULT_REGION
Value: !Ref AWS::Region
- Name: REPOSITORY_URI
Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository}
- Name: DOCKER_HOST
Value: unix:///var/run/docker.sock
Name: !Sub ${AWS::StackName}-codebuildproject
ServiceRole: !Ref CodeBuildServiceRole
BlueGreenElbDiscovery:
Type: AWS::CodeBuild::Project
Properties:
Artifacts:
Type: CODEPIPELINE
Source:
Type: CODEPIPELINE
BuildSpec: |
version: 0.1
phases:
install:
commands:
- pip install --upgrade python
- pip install --upgrade awscli
- pip install --upgrade boto3
pre_build:
commands:
- printenv
- ls -ld $(find .)
- python deployer.py
artifacts:
files: cf_inputs.json
discard-paths: yes
Environment:
ComputeType: "BUILD_GENERAL1_SMALL"
Image: aws/codebuild/python:2.7.12
Type: "LINUX_CONTAINER"
EnvironmentVariables:
- Name: AWS_DEFAULT_REGION
Value: !Ref AWS::Region
- Name: ELB_NAME
Value: !Ref GitHubRepo
Name: !Sub ${AWS::StackName}-discoveryproject
ServiceRole: !Ref CodeBuildServiceRole
Pipeline:
Type: AWS::CodePipeline::Pipeline
Properties:
RoleArn: !GetAtt CodePipelineServiceRole.Arn
ArtifactStore:
Type: S3
Location: !Ref ArtifactBucket
Stages:
- Name: Source
Actions:
- Name: App
ActionTypeId:
Category: Source
Owner: ThirdParty
Version: 1
Provider: GitHub
Configuration:
Owner: !Ref GitHubUser
Repo: !Ref GitHubRepo
Branch: !Ref GitHubBranch
OAuthToken: !Ref GitHubToken
OutputArtifacts:
- Name: App
RunOrder: 1
- Name: Template
ActionTypeId:
Category: Source
Owner: AWS
Version: 1
Provider: S3
OutputArtifacts:
- Name: Template
RunOrder: 1
Configuration:
S3Bucket: !Ref TemplateBucket
S3ObjectKey: templates.zip
- Name: Scripts
ActionTypeId:
Category: Source
Owner: AWS
Version: 1
Provider: S3
OutputArtifacts:
- Name: Scripts
RunOrder: 1
Configuration:
S3Bucket: !Ref TemplateBucket
S3ObjectKey: scripts.zip
- Name: Build
Actions:
- Name: Build
ActionTypeId:
Category: Build
Owner: AWS
Version: 1
Provider: CodeBuild
Configuration:
ProjectName: !Ref CodeBuildProject
InputArtifacts:
- Name: App
OutputArtifacts:
- Name: BuildOutput
RunOrder: 1
- Name: Discover
ActionTypeId:
Category: Build
Owner: AWS
Version: 1
Provider: CodeBuild
Configuration:
ProjectName: !Ref BlueGreenElbDiscovery
InputArtifacts:
- Name: Scripts
OutputArtifacts:
- Name: DiscoveryOutput
RunOrder: 2
- Name: Deploy-Approve-Swap
Actions:
- Name: Deploy
ActionTypeId:
Category: Deploy
Owner: AWS
Version: 1
Provider: CloudFormation
Configuration:
ChangeSetName: Deploy
ActionMode: CREATE_UPDATE
StackName: !Sub "${AWS::StackName}-ecs-cluster"
Capabilities: CAPABILITY_NAMED_IAM
TemplatePath: Template::templates/ecs-cluster.yaml
RoleArn: !GetAtt CloudFormationExecutionRole.Arn
ParameterOverrides: !Sub |
{
"Subnet1": "${Subnet1}",
"Subnet2": "${Subnet2}",
"VpcId": "${VpcId}",
"VpcCIDR": "${VpcCIDR}",
"Code1" : { "Fn::GetParam" : [ "DiscoveryOutput", "cf_inputs.json", "Code1" ] },
"Code2" : { "Fn::GetParam" : [ "DiscoveryOutput", "cf_inputs.json", "Code2" ] },
"Repository": "${Repository}",
"GitHubRepo": "${GitHubRepo}",
"TemplateBucket": "${TemplateBucket}"
}
InputArtifacts:
- Name: Template
- Name: DiscoveryOutput
RunOrder: 1
- Name: approve-blue-green-swap
ActionTypeId:
Category: Approval
Owner: AWS
Version: 1
Provider: Manual
Configuration:
CustomData: "Continue with blue-green swap ?"
RunOrder: 2
- Name: swap-target-group
ActionTypeId:
Category: Invoke
Owner: AWS
Version: 1
Provider: Lambda
Configuration:
FunctionName: !Ref BlueGreenFlipLambda
UserParameters: !Sub |
{
"ElbName": "${GitHubRepo}"
}
RunOrder: 3
Outputs:
PipelineUrl:
Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${Pipeline}

View file

@ -0,0 +1,220 @@
Description: >
Create ECS Cluster resources for Blue/Green Deployment on ECS. For further reference, please review BluegreenRepo ==> https://github.com/awslabs/ecs-blue-green-deployment
Parameters:
InstanceType:
Type: String
Default: t2.large
ClusterSize:
Type: Number
Default: 1
Subnet1:
Type: AWS::EC2::Subnet::Id
Subnet2:
Type: AWS::EC2::Subnet::Id
VpcId:
Type: AWS::EC2::VPC::Id
VpcCIDR:
Type: String
Code1:
Type: String
Default: latest
Code2:
Type: String
Default: latest
Repository:
Type: String
GitHubRepo:
Type: String
TemplateBucket:
Type: String
Description: >
S3 Bucket used for nested templates
Mappings:
AWSRegionToAMI:
us-east-1:
AMI: ami-71ef560b
us-east-2:
AMI: ami-1b8ca37e
us-west-1:
AMI: ami-e5cdf385
us-west-2:
AMI: ami-a64d9ade
eu-west-1:
AMI: ami-014ae578
eu-west-2:
AMI: ami-4f8d912b
eu-central-1:
AMI: ami-4255d32d
ap-northeast-1:
AMI: ami-3405af52
ap-southeast-1:
AMI: ami-134e0670
ap-southeast-2:
AMI: ami-2ab95148
ca-central-1:
AMI: ami-c802baac
Resources:
ECSRole:
Type: AWS::IAM::Role
Properties:
Path: /
#RoleName: !Sub ecs-${AWS::StackName}
AssumeRolePolicyDocument: |
{
"Statement": [{
"Effect": "Allow",
"Principal": { "Service": [ "ec2.amazonaws.com" ]},
"Action": [ "sts:AssumeRole" ]
}]
}
ManagedPolicyArns:
- arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role
InstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Path: /
Roles:
- !Ref ECSRole
SecurityGroup:
Type: "AWS::EC2::SecurityGroup"
Properties:
GroupDescription: !Sub ${AWS::StackName}-hosts
SecurityGroupIngress:
- SourceSecurityGroupId: !GetAtt LoadBalancer.Outputs.SecurityGroup
IpProtocol: -1
VpcId: !Ref VpcId
Cluster:
Type: AWS::ECS::Cluster
Properties:
ClusterName: !Ref AWS::StackName
AutoScalingGroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
VPCZoneIdentifier:
- !Ref Subnet1
- !Ref Subnet2
LaunchConfigurationName: !Ref LaunchConfiguration
MinSize: !Ref ClusterSize
MaxSize: !Ref ClusterSize
DesiredCapacity: !Ref ClusterSize
Tags:
- Key: Name
Value: !Sub ${AWS::StackName} - ECS Host
PropagateAtLaunch: true
CreationPolicy:
ResourceSignal:
Timeout: PT15M
UpdatePolicy:
AutoScalingRollingUpdate:
MinInstancesInService: 1
MaxBatchSize: 1
PauseTime: PT15M
WaitOnResourceSignals: true
LaunchConfiguration:
Type: AWS::AutoScaling::LaunchConfiguration
Metadata:
AWS::CloudFormation::Init:
config:
commands:
01_add_instance_to_cluster:
command: !Sub echo ECS_CLUSTER=${Cluster} > /etc/ecs/ecs.config
files:
"/etc/cfn/cfn-hup.conf":
mode: 000400
owner: root
group: root
content: !Sub |
[main]
stack=${AWS::StackId}
region=${AWS::Region}
"/etc/cfn/hooks.d/cfn-auto-reloader.conf":
content: !Sub |
[cfn-auto-reloader-hook]
triggers=post.update
path=Resources.ContainerInstances.Metadata.AWS::CloudFormation::Init
action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource LaunchConfiguration
services:
sysvinit:
cfn-hup:
enabled: true
ensureRunning: true
files:
- /etc/cfn/cfn-hup.conf
- /etc/cfn/hooks.d/cfn-auto-reloader.conf
Properties:
ImageId: !FindInMap [ AWSRegionToAMI, !Ref "AWS::Region", AMI ]
InstanceType: !Ref InstanceType
IamInstanceProfile: !Ref InstanceProfile
SecurityGroups:
- !Ref SecurityGroup
UserData:
"Fn::Base64": !Sub |
#!/bin/bash
yum install -y aws-cfn-bootstrap
/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource LaunchConfiguration
/opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource AutoScalingGroup
LoadBalancer:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: !Sub https://s3.amazonaws.com/${TemplateBucket}/templates/load-balancer.yaml
Parameters:
Subnet1 : !Ref Subnet1
Subnet2 : !Ref Subnet2
VpcId: !Ref VpcId
VpcCIDR: !Ref VpcCIDR
Name: !Ref GitHubRepo
GreenService:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: !Sub https://s3.amazonaws.com/${TemplateBucket}/templates/service.yaml
Parameters:
Cluster: !Ref Cluster
LoadBalancer: !GetAtt LoadBalancer.Outputs.LoadBalancerArn
Port: 8080
Tag: !Ref Code2
DesiredCount: "1"
VpcId: !Ref VpcId
Repository: !Ref Repository
Identifier: "Code2"
IsProduction: False
BlueService:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: !Sub https://s3.amazonaws.com/${TemplateBucket}/templates/service.yaml
Parameters:
Cluster: !Ref Cluster
LoadBalancer: !GetAtt LoadBalancer.Outputs.LoadBalancerArn
Port: 80
Tag: !Ref Code1
DesiredCount: "1"
VpcId: !Ref VpcId
Repository: !Ref Repository
Identifier: "Code1"
IsProduction: True
Outputs:
ClusterName:
Value: !Ref Cluster

View file

@ -0,0 +1,56 @@
Description: >
Create Application Load Balancer resources for Blue/Green Deployment on ECS. For further reference, please review BluegreenRepo ==> https://github.com/awslabs/ecs-blue-green-deployment
Parameters:
VpcId:
Type: String
VpcCIDR:
Type: String
Subnet1:
Type: AWS::EC2::Subnet::Id
Subnet2:
Type: AWS::EC2::Subnet::Id
Name:
Type: String
Resources:
SecurityGroup:
Type: "AWS::EC2::SecurityGroup"
Properties:
GroupDescription: !Sub ${AWS::StackName}-alb
SecurityGroupIngress:
- CidrIp: "0.0.0.0/0"
IpProtocol: "TCP"
FromPort: 80
ToPort: 80
- CidrIp: "0.0.0.0/0"
# Change above to !Ref VpcCIDR, if you want to restrict the beta version of the traffic to VPC CIDRs only
IpProtocol: "TCP"
FromPort: 8080
ToPort: 8080
VpcId: !Ref VpcId
LoadBalancer:
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
Properties:
Name: !Ref Name
Subnets:
- !Ref Subnet1
- !Ref Subnet2
SecurityGroups:
- !Ref SecurityGroup
Outputs:
ServiceUrl:
Description: URL of the load balancer for the sample service.
Value: !Sub http://${LoadBalancer.DNSName}
SecurityGroup:
Value: !Ref SecurityGroup
LoadBalancerArn:
Value: !Ref LoadBalancer

View file

@ -0,0 +1,145 @@
Description: >
Create ECS Service for Blue/Green Deployment on ECS. For further reference, please review BluegreenRepo ==> https://github.com/awslabs/ecs-blue-green-deployment
Parameters:
DesiredCount:
Type: Number
Default: 0
VpcId:
Type: AWS::EC2::VPC::Id
Cluster:
Type: String
LoadBalancer:
Type: String
Repository:
Type: String
Tag:
Type: String
Default: latest
Port:
Type: Number
Identifier:
Type: String
IsProduction:
Type: String
Resources:
LoadBalancerListener:
Type: AWS::ElasticLoadBalancingV2::Listener
Properties:
LoadBalancerArn: !Ref LoadBalancer
Port: !Ref Port
Protocol: HTTP
DefaultActions:
- Type: forward
TargetGroupArn: !Ref TargetGroup
TargetGroup:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
VpcId: !Ref VpcId
Port: !Ref Port
Protocol: HTTP
Matcher:
HttpCode: 200-299
HealthCheckIntervalSeconds: 10
HealthCheckPath: /
HealthCheckProtocol: HTTP
HealthCheckTimeoutSeconds: 5
HealthyThresholdCount: 2
TargetGroupAttributes:
- Key: deregistration_delay.timeout_seconds
Value: 30
Tags:
- Key: Identifier
Value: !Ref Identifier
- Key: Image
Value: !Ref Tag
- Key: IsProduction
Value: !Ref IsProduction
ListenerRule:
Type: AWS::ElasticLoadBalancingV2::ListenerRule
Properties:
Actions:
- Type: forward
TargetGroupArn: !Ref TargetGroup
Conditions:
- Field: path-pattern
Values:
- "*"
ListenerArn: !Ref LoadBalancerListener
Priority: 1
ECSServiceRole:
Type: AWS::IAM::Role
Properties:
#RoleName: !Sub ecs-service-${AWS::StackName}
Path: /
AssumeRolePolicyDocument: |
{
"Statement": [{
"Effect": "Allow",
"Principal": { "Service": [ "ecs.amazonaws.com" ]},
"Action": [ "sts:AssumeRole" ]
}]
}
ManagedPolicyArns:
- arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole
Service:
Type: AWS::ECS::Service
Properties:
Cluster: !Ref Cluster
Role: !Ref ECSServiceRole
DesiredCount: !Ref DesiredCount
TaskDefinition: !Ref TaskDefinition
LoadBalancers:
- ContainerName: simple-app
ContainerPort: 80
TargetGroupArn: !Ref TargetGroup
TaskDefinition:
Type: AWS::ECS::TaskDefinition
Properties:
#Family: !Sub ${AWS::StackName}-simple-app
ContainerDefinitions:
- Name: simple-app
Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository}:${Tag}
EntryPoint:
- /usr/sbin/apache2
- -D
- FOREGROUND
Essential: true
Memory: 128
MountPoints:
- SourceVolume: my-vol
ContainerPath: /var/www/my-vol
PortMappings:
- ContainerPort: 80
Environment:
- Name: Tag
Value: !Ref Tag
- Name: busybox
Image: busybox
EntryPoint:
- sh
- -c
Essential: false
Memory: 128
VolumesFrom:
- SourceContainer: simple-app
Command:
- /bin/sh -c "while true; do /bin/date > /var/www/my-vol/date; sleep 1; done"
Volumes:
- Name: my-vol

96
infra/templates/vpc.yaml Normal file
View file

@ -0,0 +1,96 @@
Description: >
Create VPC resources for Blue/Green Deployment on ECS. For further reference, please review BluegreenRepo ==> https://github.com/awslabs/ecs-blue-green-deployment
Parameters:
Name:
Type: String
VpcCIDR:
Type: String
Subnet1CIDR:
Type: String
Subnet2CIDR:
Type: String
Resources:
VPC:
Type: AWS::EC2::VPC
Properties:
CidrBlock: !Ref VpcCIDR
Tags:
- Key: Name
Value: !Ref Name
InternetGateway:
Type: AWS::EC2::InternetGateway
Properties:
Tags:
- Key: Name
Value: !Ref Name
InternetGatewayAttachment:
Type: AWS::EC2::VPCGatewayAttachment
Properties:
InternetGatewayId: !Ref InternetGateway
VpcId: !Ref VPC
Subnet1:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [ 0, !GetAZs ]
MapPublicIpOnLaunch: true
CidrBlock: !Ref Subnet1CIDR
Tags:
- Key: Name
Value: !Sub ${Name} (Public)
Subnet2:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [ 1, !GetAZs ]
MapPublicIpOnLaunch: true
CidrBlock: !Ref Subnet2CIDR
Tags:
- Key: Name
Value: !Sub ${Name} (Public)
RouteTable:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref VPC
Tags:
- Key: Name
Value: !Ref Name
DefaultRoute:
Type: AWS::EC2::Route
Properties:
RouteTableId: !Ref RouteTable
DestinationCidrBlock: 0.0.0.0/0
GatewayId: !Ref InternetGateway
Subnet1RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref RouteTable
SubnetId: !Ref Subnet1
Subnet2RouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref RouteTable
SubnetId: !Ref Subnet2
Outputs:
Subnet1:
Value: !Ref Subnet1
Subnet2:
Value: !Ref Subnet2
VpcId:
Value: !Ref VPC

View file

@ -129,10 +129,13 @@
</execution>
</executions>
</plugin>
<plugin>
<!-- <plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>3.1.2</version>
<configuration>
<suppressionsLocation>checkstyle-suppressions.xml</suppressionsLocation>
</configuration>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
@ -162,7 +165,7 @@
</goals>
</execution>
</executions>
</plugin>
</plugin> -->
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>