diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index cb9f880f..fd9abdf5 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -7,14 +7,11 @@ on: branches: [ develop ] jobs: - build: - runs-on: ubuntu-latest - steps: - uses: actions/checkout@v2 - - name: Build aws/ecr-repository - run: cd blueprints/aws/ecr-repository && DOCKER_TAGS=$(date +%s),latest make build - - name: Build aws/ecs-task-definition - run: cd blueprints/aws/ecs-task-definition && DOCKER_TAGS=$(date +%s),latest make build + - name: Build aws/ecr + run: cd aws-ecr && DOCKER_TAGS=$(date +%s),latest make build + - name: Build aws/ecs + run: cd aws-ecs && DOCKER_TAGS=$(date +%s),latest make build diff --git a/.github/workflows/docker-publish-gh.yml b/.github/workflows/docker-publish-gh.yml index d4b53614..7c9d11e6 100644 --- a/.github/workflows/docker-publish-gh.yml +++ b/.github/workflows/docker-publish-gh.yml @@ -54,4 +54,4 @@ jobs: #docker tag $IMAGE_NAME $IMAGE_ID:$VERSION #docker push $IMAGE_ID:$VERSION - cd blueprints/aws/ecr-repository && make push + cd aws-ecr && make push diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index e4eca05d..8646ad85 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -22,8 +22,8 @@ jobs: - name: Log into registry run: echo "${{ secrets.DOCKERHUB_TOKEN }}" | docker login -u ${{ secrets.DOCKERHUB_USER }} --password-stdin - - name: Publish aws/ecr-repository - run: cd blueprints/aws/ecr-repository && make push + - name: Publish aws/ecr + run: cd aws-ecr && make push - - name: Publish aws/ecs-task-definition - run: cd blueprints/aws/ecs-task-definition && make push + - name: Publish aws/ecs + run: cd aws-ecs && make push diff --git a/aws-batch/README.md b/aws-batch/README.md new file mode 100644 index 00000000..81fac9f1 --- /dev/null +++ b/aws-batch/README.md @@ -0,0 +1,18 @@ +# AWS Batch role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Batch jobs specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/aws-batch/job/README.md b/aws-batch/job/README.md new file mode 100644 index 00000000..54b29cec --- /dev/null +++ b/aws-batch/job/README.md @@ -0,0 +1,7 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/aws-batch/job/main.tf b/aws-batch/job/main.tf new file mode 100644 index 00000000..9c7c4296 --- /dev/null +++ b/aws-batch/job/main.tf @@ -0,0 +1,4 @@ +resource "aws_batch_job_definition" "" { + name = "" + type = "" +} diff --git a/aws-batch/main.tf b/aws-batch/main.tf new file mode 100644 index 00000000..aec7f844 --- /dev/null +++ b/aws-batch/main.tf @@ -0,0 +1,44 @@ +/** + * # AWS Batch role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Batch jobs specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "batch_admin" { + name = "bedrock-awsbatch-admin" + assume_role_policy = "${data.aws_iam_policy_document.assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2FullAccess" + role = "${aws_iam_role.batch_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-passrole" + role = "${aws_iam_role.batch_admin.id}" +} + +resource "aws_iam_role_policy_attachment" "ec2_instance_profile_fullaccess" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-ec2-instance-profile-fullaccess" + role = "${aws_iam_role.batch_admin.id}" +} diff --git a/blueprints/aws/iam/poweruser/provider.tf b/aws-batch/provider.tf similarity index 100% rename from blueprints/aws/iam/poweruser/provider.tf rename to aws-batch/provider.tf diff --git a/aws-chime/notification/README.md b/aws-chime/notification/README.md new file mode 100644 index 00000000..d77d13f8 --- /dev/null +++ b/aws-chime/notification/README.md @@ -0,0 +1,13 @@ +# AWS Lambda function configuration + +Deploy a lambda function. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | +| webhook\_url | Chime webhook URL | string | - | yes | + diff --git a/aws-chime/notification/lambda/ChimeNotification.py b/aws-chime/notification/lambda/ChimeNotification.py new file mode 100644 index 00000000..ec3d22eb --- /dev/null +++ b/aws-chime/notification/lambda/ChimeNotification.py @@ -0,0 +1,21 @@ +import os + +import requests + +webhook_url = os.environ['WebhookUrl'] + + +def lambda_handler(event, context): + notify(webhook_url, get_message(event)) + + +def get_message(event): + if 'Records' in event: + return event['Records'][0]['Sns']['Subject'] + + return event['Message'] + + +def notify(url, content): + print(f"Publishing notification: {content}") + requests.post(url=url, json={'Content': content}) diff --git a/aws-chime/notification/main.tf b/aws-chime/notification/main.tf new file mode 100644 index 00000000..43cbe1ca --- /dev/null +++ b/aws-chime/notification/main.tf @@ -0,0 +1,35 @@ +/** + * # AWS Lambda function configuration + * + * Deploy a lambda function. + */ +data "archive_file" "chime_notification" { + output_path = "chime_notification.zip" + type = "zip" + source_dir = "${var.lambda_path}" +} + +data "aws_iam_role" "chime_notification" { + name = "bedrock-chime-notification-role" +} + +resource "aws_lambda_function" "chime_notification" { + function_name = "ChimeNotification" + handler = "ChimeNotification.lambda_handler" + filename = "${data.archive_file.chime_notification.output_path}" + role = "${data.aws_iam_role.chime_notification.arn}" + runtime = "python3.6" + source_code_hash = "${data.archive_file.chime_notification.output_base64sha256}" + layers = ["arn:aws:lambda:ap-southeast-2:976651329757:layer:python-requests:2"] + + environment { + variables { + WebhookUrl = "${var.webhook_url}" + } + } +} + +resource "aws_cloudwatch_log_group" "chime_notification" { + name = "/aws/lambda/${aws_lambda_function.chime_notification.function_name}" + retention_in_days = 30 +} diff --git a/aws-chime/notification/vars.tf b/aws-chime/notification/vars.tf new file mode 100644 index 00000000..c3d8237b --- /dev/null +++ b/aws-chime/notification/vars.tf @@ -0,0 +1,8 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} + +variable "webhook_url" { + description = "Chime webhook URL" +} diff --git a/blueprints/nginx/reverseproxy/aws/provider.tf b/aws-chime/provider.tf similarity index 92% rename from blueprints/nginx/reverseproxy/aws/provider.tf rename to aws-chime/provider.tf index bab8017e..1ac0fd10 100644 --- a/blueprints/nginx/reverseproxy/aws/provider.tf +++ b/aws-chime/provider.tf @@ -1,7 +1,7 @@ provider "aws" { version = ">= 2.7.0" assume_role { - role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-nginx-admin" + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-chime-admin" } } diff --git a/aws-cloudfront/distribution/README.md b/aws-cloudfront/distribution/README.md new file mode 100644 index 00000000..26e8c64f --- /dev/null +++ b/aws-cloudfront/distribution/README.md @@ -0,0 +1,16 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| access\_log\_bucket | An S3 bucket used as a target for access logs | string | `` | no | +| aliases | A list of associated domain names that reference the distribution | list | `` | no | +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| bucket\_name | Name of target S3 bucket | string | - | yes | +| default\_root\_object | The default page when accessing the root URL of the distribution | string | `index.html` | no | +| default\_ttl | Default time-to-live (TTL) for objects in cache | string | `86400` | no | +| enabled | Indicates if distribution is enabled | string | `false` | no | +| error\_page | Error page returned for 404 errors | string | - | yes | +| hosted\_zone | Route53 zone for alias domain names | string | - | yes | +| price\_class | Specifies the edge locations based on price class | string | `PriceClass_100` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-cloudfront/distribution/main.tf b/aws-cloudfront/distribution/main.tf new file mode 100644 index 00000000..24c26404 --- /dev/null +++ b/aws-cloudfront/distribution/main.tf @@ -0,0 +1,108 @@ +data "aws_caller_identity" "current" {} + +data "aws_s3_bucket" "access_log" { + bucket = replace(var.access_log_bucket, "/\\A\\z/", format("%s-access-logs", data.aws_caller_identity.current.account_id)) +} + +data "aws_s3_bucket" "bucket" { + bucket = var.bucket_name +} + +data "aws_route53_zone" "primary" { + name = "${var.hosted_zone}." +} + +//Function must live in us-east-1 and Terraform can't lookup across regions +//data "aws_lambda_function" "url_rewrite" { +// function_name = "CloudFrontRewrite" +//} + +resource "aws_cloudfront_distribution" "distribution" { + enabled = var.enabled + price_class = var.price_class + default_root_object = var.default_root_object + + custom_error_response { + error_code = 404 + response_page_path = var.error_page + response_code = 404 + } + + aliases = var.aliases + + origin = { + domain_name = data.aws_s3_bucket.bucket.bucket_domain_name + origin_id = "S3-${data.aws_s3_bucket.bucket.bucket}" + } + + default_cache_behavior = { + viewer_protocol_policy = "allow-all" + allowed_methods = ["GET", "HEAD"] + cached_methods = ["GET", "HEAD"] + + forwarded_values = { + cookies = { + forward = "none" + } + + query_string = false + } + + target_origin_id = "S3-${data.aws_s3_bucket.bucket.bucket}" + default_ttl = var.default_ttl + } + + ordered_cache_behavior { + path_pattern = "*+*" + viewer_protocol_policy = "allow-all" + allowed_methods = ["GET", "HEAD"] + cached_methods = ["GET", "HEAD"] + + forwarded_values = { + cookies = { + forward = "none" + } + + query_string = false + } + + target_origin_id = "S3-${data.aws_s3_bucket.bucket.bucket}" + default_ttl = var.default_ttl + + lambda_function_association { + event_type = "viewer-request" + lambda_arn = "arn:aws:lambda:us-east-1:${data.aws_caller_identity.current.account_id}:function:CloudFrontRewrite:1" + include_body = false + } + } + + logging_config { + bucket = data.aws_s3_bucket.access_log.bucket_domain_name + prefix = "cloudfront-${data.aws_s3_bucket.bucket.bucket}/" + } + + restrictions { + geo_restriction { + restriction_type = "none" + } + } + + viewer_certificate { + cloudfront_default_certificate = true + + // minimum_protocol_version = "TLSv1.2_2018" + } +} + +resource "aws_route53_record" "www" { + count = length(var.aliases) + zone_id = data.aws_route53_zone.primary.zone_id + name = element(var.aliases, count.index) + type = "CNAME" + + alias { + evaluate_target_health = false + name = aws_cloudfront_distribution.distribution.id + zone_id = data.aws_route53_zone.primary.zone_id + } +} diff --git a/aws-cloudfront/distribution/provider.tf b/aws-cloudfront/distribution/provider.tf new file mode 100644 index 00000000..5f4f0e4c --- /dev/null +++ b/aws-cloudfront/distribution/provider.tf @@ -0,0 +1,16 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-cloudfront-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/aws-cloudfront/provider.tf b/aws-cloudfront/provider.tf new file mode 100644 index 00000000..46aa6ff6 --- /dev/null +++ b/aws-cloudfront/provider.tf @@ -0,0 +1,16 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-ec2-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/aws-cloudfront/rewrite/README.md b/aws-cloudfront/rewrite/README.md new file mode 100644 index 00000000..2a794d29 --- /dev/null +++ b/aws-cloudfront/rewrite/README.md @@ -0,0 +1,12 @@ +# AWS Lambda function configuration + +Deploy a lambda function. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-cloudfront/rewrite/lambda/CloudFrontRewrite.js b/aws-cloudfront/rewrite/lambda/CloudFrontRewrite.js new file mode 100644 index 00000000..53e93a74 --- /dev/null +++ b/aws-cloudfront/rewrite/lambda/CloudFrontRewrite.js @@ -0,0 +1,5 @@ +exports.handler = (event, context, callback) => { + const request = event.Records[0].cf.request; + request.uri = request.uri.replace(/\+/g, '%2B'); + callback(null, request); +}; diff --git a/aws-cloudfront/rewrite/lambda/CloudFrontRewrite.py b/aws-cloudfront/rewrite/lambda/CloudFrontRewrite.py new file mode 100644 index 00000000..7b3299c0 --- /dev/null +++ b/aws-cloudfront/rewrite/lambda/CloudFrontRewrite.py @@ -0,0 +1,6 @@ +import re + +def lambda_handler(event, context): + request = event['Records'][0]['cf']['request'] + request.uri = re.sub(r"\+", "%2B", request.uri) + return request diff --git a/aws-cloudfront/rewrite/main.tf b/aws-cloudfront/rewrite/main.tf new file mode 100644 index 00000000..e3ed7510 --- /dev/null +++ b/aws-cloudfront/rewrite/main.tf @@ -0,0 +1,28 @@ +/** + * # AWS Lambda function configuration + * + * Deploy a lambda function. + */ +data "archive_file" "cloudfront_rewrite" { + output_path = "cloudfront_rewrite.zip" + type = "zip" + source_dir = "${format("%s", var.lambda_path)}" +} + +data "aws_iam_role" "cloudfront_rewrite" { + name = "bedrock-cloudfront-rewrite-role" +} + +resource "aws_lambda_function" "cloudfront_rewrite" { + function_name = "CloudFrontRewrite" + handler = "CloudFrontRewrite.handler" + filename = "${data.archive_file.cloudfront_rewrite.output_path}" + role = "${data.aws_iam_role.cloudfront_rewrite.arn}" + runtime = "nodejs8.10" + source_code_hash = "${data.archive_file.cloudfront_rewrite.output_base64sha256}" +} + +resource "aws_cloudwatch_log_group" "cloudfront_rewrite" { + name = "/aws/lambda/${aws_lambda_function.cloudfront_rewrite.function_name}" + retention_in_days = 30 +} diff --git a/aws-cloudfront/rewrite/vars.tf b/aws-cloudfront/rewrite/vars.tf new file mode 100644 index 00000000..4b0fb0d1 --- /dev/null +++ b/aws-cloudfront/rewrite/vars.tf @@ -0,0 +1,4 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} diff --git a/aws-cloudmap/README.md b/aws-cloudmap/README.md new file mode 100644 index 00000000..96f5d92b --- /dev/null +++ b/aws-cloudmap/README.md @@ -0,0 +1,21 @@ +## Requirements + +No requirements. + +## Providers + +No provider. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| name | Namespace name | `any` | n/a | yes | +| private\_zone | Name of the private zone for this namespace | `any` | n/a | yes | +| public\_zone | Name of the public zone for this namespace | `any` | n/a | yes | +| vpc | VPC identifer for private DNS | `any` | n/a | yes | + +## Outputs + +No output. + diff --git a/aws-cloudmap/main.tf b/aws-cloudmap/main.tf new file mode 100644 index 00000000..7b3f9fe1 --- /dev/null +++ b/aws-cloudmap/main.tf @@ -0,0 +1,16 @@ +module "public_dns" { + source = "figurate/service-discovery-namespace/aws" + + name = var.public_zone + description = "Public DNS for ${var.name} namespace" + namespace_type = "public" +} + +module "private_dns" { + source = "figurate/service-discovery-namespace/aws" + + name = var.private_zone + description = "Private DNS for ${var.name} namespace" + namespace_type = "private" + vpc = var.vpc +} diff --git a/aws-cloudmap/vars.tf b/aws-cloudmap/vars.tf new file mode 100644 index 00000000..0e2f8b8e --- /dev/null +++ b/aws-cloudmap/vars.tf @@ -0,0 +1,15 @@ +variable "name" { + description = "Namespace name" +} + +variable "public_zone" { + description = "Name of the public zone for this namespace" +} + +variable "private_zone" { + description = "Name of the private zone for this namespace" +} + +variable "vpc" { + description = "VPC identifer for private DNS" +} diff --git a/aws-cloudwatch/log/README.md b/aws-cloudwatch/log/README.md new file mode 100644 index 00000000..ec2a5c25 --- /dev/null +++ b/aws-cloudwatch/log/README.md @@ -0,0 +1,29 @@ +Provision a CloudWatch Log Group + +## Requirements + +| Name | Version | +|------|---------| +| aws | >= 2.7.0 | + +## Providers + +| Name | Version | +|------|---------| +| aws | >= 2.7.0 | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| assume\_role\_account | AWS account ID for the role to assume into | `any` | `null` | no | +| context | Contextual naming of the log group (eg. user, service, etc.) | `string` | `""` | no | +| environment | The name of the environment associated with the log group | `any` | n/a | yes | +| retention\_days | How many days to retain logs in the log group | `number` | `14` | no | +| service\_name | The log group name | `any` | n/a | yes | +| service\_type | The type of service this log group is for (used to prefix the name) | `string` | `""` | no | + +## Outputs + +No output. + diff --git a/aws-cloudwatch/main.tf b/aws-cloudwatch/main.tf new file mode 100644 index 00000000..252d2901 --- /dev/null +++ b/aws-cloudwatch/main.tf @@ -0,0 +1,10 @@ +resource "aws_cloudwatch_log_group" "log" { + name = "cloudtrail" + retention_in_days = var.log_retention_in_days +} + +module "alarms" { + source = "nozaq/secure-baseline/aws//modules/alarm-baseline" + + cloudtrail_log_group_name = aws_cloudwatch_log_group.log.name +} diff --git a/aws-cloudwatch/provider.tf b/aws-cloudwatch/provider.tf new file mode 100644 index 00000000..22411a80 --- /dev/null +++ b/aws-cloudwatch/provider.tf @@ -0,0 +1,16 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-cloudwatch-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/aws-cloudwatch/trigger/README.md b/aws-cloudwatch/trigger/README.md new file mode 100644 index 00000000..ac774a15 --- /dev/null +++ b/aws-cloudwatch/trigger/README.md @@ -0,0 +1,12 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| description | Description of the trigger event rule | string | - | yes | +| function\_input | A map of values passed to the function invocation | map | `` | no | +| function\_name | Name of the Lambda function triggered by bucket changes | string | - | yes | +| region | AWS default region | string | - | yes | +| trigger\_name | Name of the trigger event rule | string | - | yes | +| trigger\_schedule | CRON expression for trigger | string | - | yes | + diff --git a/aws-cloudwatch/trigger/main.tf b/aws-cloudwatch/trigger/main.tf new file mode 100644 index 00000000..643a3d90 --- /dev/null +++ b/aws-cloudwatch/trigger/main.tf @@ -0,0 +1,24 @@ +data "aws_lambda_function" "trigger" { + function_name = "${var.function_name}" +} + +resource "aws_cloudwatch_event_rule" "event_rule" { + name = "${var.trigger_name}" + description = "${var.description}" + schedule_expression = "${format("cron(%s)", var.trigger_schedule)}" + role_arn = "" +} + +resource "aws_cloudwatch_event_target" "event_target" { + arn = data.aws_lambda_function.trigger.arn + rule = aws_cloudwatch_event_rule.event_rule.name + input = jsonencode(var.function_input) +} + +resource "aws_lambda_permission" "trigger" { + statement_id = "${var.trigger_name}" + action = "lambda:InvokeFunction" + function_name = "${data.aws_lambda_function.trigger.function_name}" + principal = "events.amazonaws.com" + source_arn = "${aws_cloudwatch_event_rule.event_rule.arn}" +} diff --git a/aws-cloudwatch/trigger/vars.tf b/aws-cloudwatch/trigger/vars.tf new file mode 100644 index 00000000..c30a78e0 --- /dev/null +++ b/aws-cloudwatch/trigger/vars.tf @@ -0,0 +1,21 @@ +variable "trigger_name" { + description = "Name of the trigger event rule" +} + +variable "description" { + description = "Description of the trigger event rule" +} + +variable "trigger_schedule" { + description = "CRON expression for trigger" +} + +variable "function_name" { + description = "Name of the Lambda function triggered by bucket changes" +} + +variable "function_input" { + description = "A map of values passed to the function invocation" + type = "map" + default = {} +} diff --git a/aws-cloudwatch/vars.tf b/aws-cloudwatch/vars.tf new file mode 100644 index 00000000..07a1de14 --- /dev/null +++ b/aws-cloudwatch/vars.tf @@ -0,0 +1,4 @@ +variable "log_retention_in_days" { + description = "How long to retain CloudWatch logs" + default = 14 +} diff --git a/aws-codebuild/README.md b/aws-codebuild/README.md new file mode 100644 index 00000000..9743fb4b --- /dev/null +++ b/aws-codebuild/README.md @@ -0,0 +1,23 @@ +# CI/CD Build + +## Synopsis + +The principles of Continuous Integration and Continuous Deployment are similar: + +* Automate repetitive processes +* Avoid human/machine interaction whenever possible + +The benefits of these principles are: + +* Improved efficiency of processes that computers can perform faster than humans +* Reduced error rates and consistency + +## Purpose + +This blueprint is intended to provide support for CI/CD builds within a Cloud +tenancy. Builds that are contained within the account boundary are more +secure as they can be used to restrict permissions of external API users. + +By allowing external users the ability to trigger builds only, tenancy +modifications are securely controlled and audited. + \ No newline at end of file diff --git a/aws-codebuild/blueprint/README.md b/aws-codebuild/blueprint/README.md new file mode 100644 index 00000000..778778ca --- /dev/null +++ b/aws-codebuild/blueprint/README.md @@ -0,0 +1,39 @@ +# AWS Codebuild configuration + +Provision a codebuild with the following features: + + - Dedicated user with limited permissions + - Build parameters stored in SSM Parameter Store + - KMS encryption of sensitive build parameters + - Configurable build container + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| aws\_access\_key | IAM access key used by codebuild to execute the blueprint | string | - | yes | +| aws\_secret\_key | IAM secret access key used by codebuild to execute the blueprint | string | - | yes | +| blueprints\_bucket | S3 bucket containing blueprint packages | string | - | yes | +| build\_timeout | Maximum build time in minutes | string | `5` | no | +| buildspec | Build specification content | string | `version 0.2 + +phases: + build: + commands: + - docker run --privileged -it --rm \ + --mount type=bind,source="$(pwd)",target=/work \ + -e TF_BACKEND_KEY=$BLUEPRINT/\${TF_BACKEND_KEY:-$(basename $PWD)} \ + -e TF_APPLY_ARGS="\${TF_APPLY_ARGS}" \ + -e AWS_PROFILE=\${AWS_PROFILE-iamadmin} \ + -e TF_VAR_region=\${AWS_DEFAULT_REGION} \ + -e http_proxy=\${http_proxy:-} \ + -e https_proxy=\${https_proxy:-} \ + -e no_proxy=\${no_proxy:-} \ + --net=host \ + bedrock/$BLUEPRINT $@ +` | no | +| codebuild\_image | Docker image used to run build specs | string | `aws/codebuild/docker:17.09.0` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-codebuild/blueprint/main.tf b/aws-codebuild/blueprint/main.tf new file mode 100644 index 00000000..50ea3959 --- /dev/null +++ b/aws-codebuild/blueprint/main.tf @@ -0,0 +1,63 @@ +/** + * # AWS Codebuild configuration + * + * Provision a codebuild with the following features: + * + * - Dedicated user with limited permissions + * - Build parameters stored in SSM Parameter Store + * - KMS encryption of sensitive build parameters + * - Configurable build container + * + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_role" "codebuild" { + name = "bedrock-codebuild-role" +} + +data "aws_s3_bucket" "blueprints" { + bucket = "${data.aws_caller_identity.current.account_id}-${var.blueprints_bucket}" +} + +resource "aws_codebuild_project" "blueprint" { + name = "blueprint" + description = "Provision infrastructure with bedrock blueprint" + build_timeout = "${var.build_timeout}" + service_role = "${data.aws_iam_role.codebuild.arn}" + encryption_key = "${aws_kms_key.build_params.id}" + source { + type = "NO_SOURCE" + buildspec = "${var.buildspec}" + } + secondary_sources { + type = "S3" + source_identifier = "package" + location = "${data.aws_s3_bucket.blueprints.bucket}/package.zip" + } + artifacts { + type = "NO_ARTIFACTS" + } + environment { + compute_type = "BUILD_GENERAL1_SMALL" + image = "${var.codebuild_image}" + type = "LINUX_CONTAINER" + privileged_mode = true + environment_variable { + name = "AWS_DEFAULT_REGION" + value = "${var.region}" + } + environment_variable { + name = "AWS_ACCESS_KEY_ID" + value = "${aws_ssm_parameter.aws_access_key.name}" + type = "PARAMETER_STORE" + } + environment_variable { + name = "AWS_SECRET_ACCESS_KEY" + value = "${aws_ssm_parameter.aws_secret_key.name}" + type = "PARAMETER_STORE" + } + } + tags { + Environment = "Test" + } +} diff --git a/aws-codebuild/blueprint/ssm.tf b/aws-codebuild/blueprint/ssm.tf new file mode 100644 index 00000000..e5c7cd8f --- /dev/null +++ b/aws-codebuild/blueprint/ssm.tf @@ -0,0 +1,36 @@ +resource "aws_kms_key" "build_params" { + description = "Encryption key for sensitive build parameters" + enable_key_rotation = true + tags { + Consumer = "codebuild" + } +} + +resource "aws_kms_alias" "build_params" { + target_key_id = "${aws_kms_key.build_params.id}" + name = "alias/blueprint-params" +} + +resource "aws_ssm_parameter" "aws_access_key" { + name = "/blueprint/aws_access_key" + description = "AWS access key for blueprint builds" + type = "SecureString" + key_id = "${aws_kms_key.build_params.arn}" + value = "${var.aws_access_key}" + overwrite = true + tags { + Consumer = "codebuild" + } +} + +resource "aws_ssm_parameter" "aws_secret_key" { + name = "/blueprint/aws_secret_key" + description = "AWS access secret key for blueprint builds" + type = "SecureString" + key_id = "${aws_kms_key.build_params.arn}" + value = "${var.aws_secret_key}" + overwrite = true + tags { + Consumer = "codebuild" + } +} diff --git a/aws-codebuild/blueprint/vars.tf b/aws-codebuild/blueprint/vars.tf new file mode 100644 index 00000000..af8609ca --- /dev/null +++ b/aws-codebuild/blueprint/vars.tf @@ -0,0 +1,56 @@ +variable "codebuild_image" { + description = "Docker image used to run build specs" + default = "aws/codebuild/docker:17.09.0" +} + +variable "region" { + description = "AWS default region" +} + +//variable "build_type" { +// description = "Indicates the buildspec to use for the build job" +// default = "blueprint" +//} + +variable "build_timeout" { + description = "Maximum build time in minutes" + default = "5" +} + +//variable "codecommit_repo" { +// description = "The source repository for the codebuild specification" +//} + +variable "blueprints_bucket" { + description = "S3 bucket containing blueprint packages" +} + +variable "buildspec" { + description = "Build specification content" + default = <` | no | + diff --git a/aws-config/env/cloudformation/config-rule.yml b/aws-config/env/cloudformation/config-rule.yml new file mode 100644 index 00000000..3e9394ea --- /dev/null +++ b/aws-config/env/cloudformation/config-rule.yml @@ -0,0 +1,27 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning a Config rule. + +Parameters: + Environment: + Type: String + Description: The name of the environment represented by the VPC + ManagedRuleId: + Type: String + Description: The identifier for an AWS-managed rule + +Resources: + Rule: + Type: AWS::Config::Rule + Properties: + ConfigRuleName: + Description: + InputParameters: + Scope: + TagKey: 'Environment' + TagValue: !Ref Environment + Source: + Owner: 'AWS' + SourceIdentifier: !Ref ManagedRuleId + +Outputs: diff --git a/aws-config/env/export.sh b/aws-config/env/export.sh new file mode 100755 index 00000000..8d8c78df --- /dev/null +++ b/aws-config/env/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-config/env/main.tf b/aws-config/env/main.tf new file mode 100644 index 00000000..b27063cc --- /dev/null +++ b/aws-config/env/main.tf @@ -0,0 +1,14 @@ +/** + * # AWS Config Rule configuration + * + * Provision a Config Rule. + */ +resource "aws_cloudformation_stack" "config_rule" { + count = "${length(var.rule_ids)}" + name = "${var.environment}-config_rule-${count.index}" + parameters { + Environment = "${var.environment}" + ManagedRuleId = "${element(var.rule_ids, count.index)}" + } + template_body = "${file(format("%s/config-rule.yml", var.cloudformation_path))}" +} diff --git a/blueprints/aws/rds/cycle/provider.tf b/aws-config/env/provider.tf similarity index 100% rename from blueprints/aws/rds/cycle/provider.tf rename to aws-config/env/provider.tf diff --git a/aws-config/env/vars.tf b/aws-config/env/vars.tf new file mode 100644 index 00000000..4aa8c076 --- /dev/null +++ b/aws-config/env/vars.tf @@ -0,0 +1,10 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} + +variable "rule_ids" { + type = list(string) + description = "A list of identifers for AWS-managed rules, e.g. _EC2_INSTANCE_MANAGED_BY_SSM_" + default = [] +} diff --git a/blueprints/aws/rds/instance/provider.tf b/aws-config/provider.tf similarity index 100% rename from blueprints/aws/rds/instance/provider.tf rename to aws-config/provider.tf diff --git a/aws-config/rule/README.md b/aws-config/rule/README.md new file mode 100644 index 00000000..64562084 --- /dev/null +++ b/aws-config/rule/README.md @@ -0,0 +1,11 @@ +# AWS Config Rule configuration + +Provision a Config Rule. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| region | AWS default region | string | - | yes | +| rule\_ids | A list of identifers for AWS-managed rules, e.g. _S3_BUCKET_PUBLIC_WRITE_PROHIBITED_ | list | `` | no | + diff --git a/aws-config/rule/main.tf b/aws-config/rule/main.tf new file mode 100644 index 00000000..f1be137a --- /dev/null +++ b/aws-config/rule/main.tf @@ -0,0 +1,13 @@ +/** + * # AWS Config Rule configuration + * + * Provision a Config Rule. + */ +resource "aws_config_config_rule" "config_rule" { + count = "${length(var.rule_ids)}" + name = "global-rule-${count.index}" + source { + owner = "AWS" + source_identifier = "${element(var.rule_ids, count.index)}" + } +} diff --git a/aws-config/rule/vars.tf b/aws-config/rule/vars.tf new file mode 100644 index 00000000..059a5d93 --- /dev/null +++ b/aws-config/rule/vars.tf @@ -0,0 +1,5 @@ +variable "rule_ids" { + type = "list" + description = "A list of identifers for AWS-managed rules, e.g. _S3_BUCKET_PUBLIC_WRITE_PROHIBITED_" + default = [] +} diff --git a/aws-dynamodb/Makefile b/aws-dynamodb/Makefile new file mode 100644 index 00000000..578038df --- /dev/null +++ b/aws-dynamodb/Makefile @@ -0,0 +1,50 @@ +SHELL:=/bin/bash +AWS_DEFAULT_REGION?=ap-southeast-2 + +TERRAFORM_VERSION=0.13.4 +TERRAFORM=docker run --rm -v "${PWD}:/work" -v "${HOME}:/root" -e AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) -e http_proxy=$(http_proxy) --net=host -w /work hashicorp/terraform:$(TERRAFORM_VERSION) + +TERRAFORM_DOCS=docker run --rm -v "${PWD}:/work" tmknom/terraform-docs + +CHECKOV=docker run --rm -v "${PWD}:/work" bridgecrew/checkov + +TFSEC=docker run --rm -v "${PWD}:/work" liamg/tfsec + +DIAGRAMS=docker run -v "${PWD}:/work" figurate/diagrams python + +NAME=$(wordlist 2, $(words $(MAKECMDGOALS)), $(MAKECMDGOALS)) + +REGISTRY?=bedrock +BLUEPRINT_ID=dynamodb-table +TAGS?=latest +BUILD_ARGS?= + +.PHONY: all build tag push + +all: build + +clean: + docker rmi $(REGISTRY)/$(BLUEPRINT_ID) + +validate: + $(TERRAFORM) init -upgrade && $(TERRAFORM) validate + +test: validate + $(CHECKOV) -d /work + + $(TFSEC) /work + +docs: diagram + $(TERRAFORM_DOCS) markdown ./ >./README.md + +format: + $(TERRAFORM) fmt -list=true ./ + +build: validate + docker build -t $(REGISTRY)/$(BLUEPRINT_ID) ${BUILD_ARGS} --build-arg TERRAFORM_VERSION=$(TERRAFORM_VERSION) --build-arg HTTP_PROXY=${http_proxy} --network=host . + +tag: build + echo $(TAGS) | tr "/," "-\n" | xargs -n1 -I % docker tag $(REGISTRY)/$(BLUEPRINT_ID) $(REGISTRY)/$(BLUEPRINT_ID):% + +push: tag + echo $(TAGS) | tr "/," "-\n" | xargs -n1 -I % docker push $(REGISTRY)/$(BLUEPRINT_ID):% diff --git a/blueprints/aws/cloudfront/rewrite/Dockerfile b/aws-dynamodb/import/Dockerfile similarity index 100% rename from blueprints/aws/cloudfront/rewrite/Dockerfile rename to aws-dynamodb/import/Dockerfile diff --git a/aws-dynamodb/import/README.md b/aws-dynamodb/import/README.md new file mode 100644 index 00000000..1d16af08 --- /dev/null +++ b/aws-dynamodb/import/README.md @@ -0,0 +1,18 @@ +# DynamoDB Batch Import + +Support processing of data files of various formats (e.g. CSV) to populate a DynamoDB table. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| auto\_generate\_key | Indicates whether the import function should automatically generate a unique partition key | string | `true` | no | +| data\_types | A map of column names with applicable DynamoDB data type | map | `` | no | +| function\_name | A unique name used to reference the function | string | - | yes | +| import\_timeout | The maximum time (seconds) to allow the import job to execute | string | `30` | no | +| item\_template | A template map used to initialise each item | map | `` | no | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | +| table\_name | The name of the DynamoDB table to import into | string | - | yes | + diff --git a/aws-dynamodb/import/lambda/DynamoDBImportCsv.py b/aws-dynamodb/import/lambda/DynamoDBImportCsv.py new file mode 100644 index 00000000..855f2d09 --- /dev/null +++ b/aws-dynamodb/import/lambda/DynamoDBImportCsv.py @@ -0,0 +1,65 @@ +""" +DynamoDB Import Comma-separated values (CSV) document. + +An AWS Lambda function used to import batch records to a DynamoDB table. +""" +import csv +import json +import os +import uuid + +import boto3 +from botocore.exceptions import ClientError + +data_types = os.environ['DataTypes'] +table_name = os.environ['TableName'] +item_template = json.loads(os.environ['ItemTemplate']) +auto_generate_key = os.environ['AutoGenerateKey'] + + +def lambda_handler(event, context): + bucket = event['Records'][0]['s3']['bucket']['name'] + filename = event['Records'][0]['s3']['object']['key'] + + import_file(bucket, filename) + + +def import_file(bucket, filename): + s3 = boto3.client('s3') + dynamodb = boto3.client('dynamodb') + try: + data = s3.get_object(Bucket=bucket, Key=filename)['Body'].read().decode('utf-8') + csv_reader = csv.reader(data.splitlines(), delimiter=',') + columns = None + linenum = 0 + for row in csv_reader: + if linenum == 0: + # use header row to identify the columns.. + columns = row + else: + import_row(row2map(columns, row), dynamodb) + linenum += 1 + except ClientError as e: + print(e) + + +def row2map(columns, row): + retval = {} + if (auto_generate_key): + retval['UUID'] = uuid.uuid4().hex + for key, value in item_template.items(): + data_type = data_types[key] if key in data_types else 'S' + retval[key] = {data_type: value} + + for column in columns: + if len(row[columns.index(column)]) > 0: + data_type = data_types[column] if column in data_types else 'S' + retval[column] = {data_type: row[columns.index(column)]} + return retval + + +def import_row(row, client): + try: + client.put_item(TableName=table_name, Item=row) + except ClientError as e: + print(e) diff --git a/aws-dynamodb/import/main.tf b/aws-dynamodb/import/main.tf new file mode 100644 index 00000000..1ef968c9 --- /dev/null +++ b/aws-dynamodb/import/main.tf @@ -0,0 +1,39 @@ +/** + * # DynamoDB Batch Import + * + * Support processing of data files of various formats (e.g. CSV) to populate a DynamoDB table. + */ +data "aws_caller_identity" "current" {} + +data "archive_file" "import" { + output_path = "dynamodb_import.zip" + type = "zip" + source_dir = "${var.lambda_path}" +} + +data "aws_iam_role" "import" { + name = "bedrock-dynamodb-import-role" +} + +resource "aws_lambda_function" "csv_import" { + filename = "${data.archive_file.import.output_path}" + function_name = "${var.function_name}" + handler = "DynamoDBImportCsv.lambda_handler" + role = "${data.aws_iam_role.import.arn}" + runtime = "python3.6" + source_code_hash = "${data.archive_file.import.output_base64sha256}" + timeout = "${var.import_timeout}" + environment { + variables { + DataTypes = "${jsonencode(var.data_types)}" + TableName = "${var.table_name}" + ItemTemplate = "${jsonencode(var.item_template)}" + AutoGenerateKey = "${var.auto_generate_key}" + } + } +} + +resource "aws_cloudwatch_log_group" "csv_import" { + name = "/aws/lambda/${aws_lambda_function.csv_import.function_name}" + retention_in_days = 30 +} diff --git a/aws-dynamodb/import/vars.tf b/aws-dynamodb/import/vars.tf new file mode 100644 index 00000000..410b6484 --- /dev/null +++ b/aws-dynamodb/import/vars.tf @@ -0,0 +1,34 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} + +variable "function_name" { + description = "A unique name used to reference the function" +} + +variable "data_types" { + description = "A map of column names with applicable DynamoDB data type" + type = "map" + default = {} +} + +variable "table_name" { + description = "The name of the DynamoDB table to import into" +} + +variable "item_template" { + description = "A template map used to initialise each item" + type = "map" + default = {} +} + +variable "auto_generate_key" { + description = "Indicates whether the import function should automatically generate a unique partition key" + default = true +} + +variable "import_timeout" { + description = "The maximum time (seconds) to allow the import job to execute" + default = "30" +} diff --git a/blueprints/aws/dynamodb/put/provider.tf b/aws-dynamodb/provider.tf similarity index 100% rename from blueprints/aws/dynamodb/put/provider.tf rename to aws-dynamodb/provider.tf diff --git a/aws-dynamodb/put/Dockerfile b/aws-dynamodb/put/Dockerfile new file mode 100644 index 00000000..b1b70391 --- /dev/null +++ b/aws-dynamodb/put/Dockerfile @@ -0,0 +1,6 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ +ADD lambda /bootstrap/lambda + +ENV TF_VAR_lambda_path="/bootstrap/lambda" diff --git a/aws-dynamodb/put/README.md b/aws-dynamodb/put/README.md new file mode 100644 index 00000000..dae503c5 --- /dev/null +++ b/aws-dynamodb/put/README.md @@ -0,0 +1,16 @@ +# DynamoDB Put Item + +Support adding an item to a DynamoDB table from a JSON payload. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| data\_types | A map of column names with applicable DynamoDB data type | map | `` | no | +| function\_name | A unique name used to reference the function | string | - | yes | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| put\_item\_timeout | The maximum time (seconds) to allow the put job to execute | string | `30` | no | +| region | AWS default region | string | - | yes | +| table\_name | The name of the DynamoDB table to put items into | string | - | yes | + diff --git a/aws-dynamodb/put/lambda/DynamoDBPutItem.py b/aws-dynamodb/put/lambda/DynamoDBPutItem.py new file mode 100644 index 00000000..083958f9 --- /dev/null +++ b/aws-dynamodb/put/lambda/DynamoDBPutItem.py @@ -0,0 +1,35 @@ +""" +DynamoDB Put Item. + +An AWS Lambda function used to add items to a DynamoDB table. +""" +import json +import os + +import boto3 +from botocore.exceptions import ClientError + +data_types = os.environ['DataTypes'] +table_name = os.environ['TableName'] + + +def lambda_handler(event, context): + item = json.loads(event['Item']) + + put_item(dict2payload(item)) + + +def dict2payload(item): + retval = {} + for key, value in item.items(): + data_type = data_types[key] if key in data_types else 'S' + retval[key] = {data_type: value} + return retval + + +def put_item(payload): + dynamodb = boto3.client('dynamodb') + try: + dynamodb.put_item(TableName=table_name, Item=payload) + except ClientError as e: + print(e) diff --git a/aws-dynamodb/put/main.tf b/aws-dynamodb/put/main.tf new file mode 100644 index 00000000..9c90df18 --- /dev/null +++ b/aws-dynamodb/put/main.tf @@ -0,0 +1,37 @@ +/** + * # DynamoDB Put Item + * + * Support adding an item to a DynamoDB table from a JSON payload. + */ +data "aws_caller_identity" "current" {} + +data "archive_file" "put" { + output_path = "dynamodb_put.zip" + type = "zip" + source_dir = var.lambda_path +} + +data "aws_iam_role" "put" { + name = "bedrock-dynamodb-put-role" +} + +resource "aws_lambda_function" "put_item" { + filename = data.archive_file.put.output_path + function_name = var.function_name + handler = "DynamoDBPutItem.lambda_handler" + role = data.aws_iam_role.put.arn + runtime = "python3.6" + source_code_hash = data.archive_file.put.output_base64sha256 + timeout = var.put_item_timeout + environment { + variables { + DataTypes = jsonencode(var.data_types) + TableName = var.table_name + } + } +} + +resource "aws_cloudwatch_log_group" "put_item" { + name = "/aws/lambda/${aws_lambda_function.put_item.function_name}" + retention_in_days = 30 +} diff --git a/aws-dynamodb/put/vars.tf b/aws-dynamodb/put/vars.tf new file mode 100644 index 00000000..418c31da --- /dev/null +++ b/aws-dynamodb/put/vars.tf @@ -0,0 +1,23 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} + +variable "function_name" { + description = "A unique name used to reference the function" +} + +variable "data_types" { + description = "A map of column names with applicable DynamoDB data type" + type = "map" + default = {} +} + +variable "table_name" { + description = "The name of the DynamoDB table to put items into" +} + +variable "put_item_timeout" { + description = "The maximum time (seconds) to allow the put job to execute" + default = "30" +} diff --git a/aws-dynamodb/table/README.md b/aws-dynamodb/table/README.md new file mode 100644 index 00000000..a329d3ca --- /dev/null +++ b/aws-dynamodb/table/README.md @@ -0,0 +1,3 @@ +# DynamoDB Table + +Provision a NoSQL table for single table schemas. diff --git a/aws-dynamodb/table/main.tf b/aws-dynamodb/table/main.tf new file mode 100644 index 00000000..148a6849 --- /dev/null +++ b/aws-dynamodb/table/main.tf @@ -0,0 +1,5 @@ +module "table" { + source = "figurate/dynamodb-table/aws//modules/single-table" + + name = var.name +} diff --git a/aws-dynamodb/table/vars.tf b/aws-dynamodb/table/vars.tf new file mode 100644 index 00000000..b7991b43 --- /dev/null +++ b/aws-dynamodb/table/vars.tf @@ -0,0 +1,3 @@ +variable "name" { + description = "Name of the table" +} diff --git a/aws-ec2/apachesling/README.md b/aws-ec2/apachesling/README.md new file mode 100644 index 00000000..022ddf11 --- /dev/null +++ b/aws-ec2/apachesling/README.md @@ -0,0 +1,22 @@ +# AWS Sling host configuration + +Provision an EC2 instance with Apache Sling installed. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| cloudformation\_path | The root path to cloudformation templates | string | `cloudformation` | no | +| environment | The name of the environment associated with the host | string | - | yes | +| hosted\_zone | Hosted zone identifier for DNS entry | string | - | yes | +| image\_name | AWS image for Sling instance | string | `amzn2-ami-hvm-*` | no | +| image\_os | The operating system installed on the selected AMI. Valid values are:

* al2 = Amazon Linux 2 * ubuntu = Ubuntu | string | `al2` | no | +| image\_owner | AMI image owner (leave blank for current account) | string | `137112412989` | no | +| instance\_type | AWS instance type for Sling | string | `t3.micro` | no | +| region | AWS default region | string | - | yes | +| sling\_user | Username for Sling SSH user | string | - | yes | +| sling\_version | The major release version of Apache Sling to use | string | `10` | no | +| ssh\_key | Location of public key file for SSH access to host | string | `~/.ssh/id_rsa.pub` | no | +| userdata\_path | The root path to userdata templates | string | `userdata` | no | + diff --git a/aws-ec2/apachesling/export.sh b/aws-ec2/apachesling/export.sh new file mode 100755 index 00000000..cc8edb4e --- /dev/null +++ b/aws-ec2/apachesling/export.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ +cp -R /bootstrap/templates ./ diff --git a/aws-ec2/apachesling/outputs.tf b/aws-ec2/apachesling/outputs.tf new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/aws-ec2/apachesling/outputs.tf @@ -0,0 +1 @@ + diff --git a/aws-ec2/apachesling/provider.tf b/aws-ec2/apachesling/provider.tf new file mode 100644 index 00000000..9864333b --- /dev/null +++ b/aws-ec2/apachesling/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-apachesling-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/aws-ec2/apachesling/templates/al2.yml b/aws-ec2/apachesling/templates/al2.yml new file mode 100644 index 00000000..ea4b47f9 --- /dev/null +++ b/aws-ec2/apachesling/templates/al2.yml @@ -0,0 +1,123 @@ +#cloud-config +packages: +# - ntpdate + - java + - nfs-utils + - chrony + +timezone: Australia/Melbourne + +#ntp: +# enabled: true +# servers: +# - 0.au.pool.ntp.org +# - 1.au.pool.ntp.org +# - 2.au.pool.ntp.org +# - 3.au.pool.ntp.org + +users: + - name: ${AuthorizedUserName} + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - ${AuthorizedUserSSHKey} + - name: apachesling + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + +write_files: + - content: | + [Unit] + Description=Apache Sling + + [Service] + User=apachesling + WorkingDirectory=/opt/sling + ExecStart=/usr/bin/java $JAVA_OPTS -jar org.apache.sling.starter.jar $SLING_OPTS + SuccessExitStatus=143 + TimeoutStopSec=10 + Restart=on-failure + RestartSec=5 + + [Install] + WantedBy=multi-user.target + path: /etc/systemd/system/apachesling.service + - content: | + { + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/opt/sling/sling/logs/access.log", + "log_group_name": "${SlingHostname}-access-log", + "log_stream_name": "{instance_id}" + }, + { + "file_path": "/opt/sling/sling/logs/request.log", + "log_group_name": "${SlingHostname}-request-log", + "log_stream_name": "{instance_id}" + }, + { + "file_path": "/opt/sling/sling/logs/error.log", + "log_group_name": "${SlingHostname}-error-log", + "log_stream_name": "{instance_id}" + } + ] + } + } + }, + "metrics": { + "append_dimensions": { + "AutoScalingGroupName": "$${aws:AutoScalingGroupName}", + "ImageId": "$${aws:ImageId}", + "InstanceId": "$${aws:InstanceId}", + "InstanceType": "$${aws:InstanceType}" + }, + "metrics_collected": { + "mem": { + "measurement": [ + "mem_used_percent" + ], + "metrics_collection_interval": 60 + }, + "swap": { + "measurement": [ + "swap_used_percent" + ], + "metrics_collection_interval": 60 + } + } + } + } + path: /opt/aws/amazon-cloudwatch-agent/etc/config.json + +mounts: + - [ swap, none, swap, sw, 0, 0] + +swap: + filename: /var/cache/swap/swapfile + size: auto + maxsize: 2147483648 + +runcmd: + # Configure SSH + - "printf '\nClientAliveInterval 100\nClientAliveCountMax 0' >> /etc/ssh/sshd_config" + - service sshd restart + - systemctl start chronyd && systemctl enable chronyd + # Cloudwatch Agent install + - curl -O https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm + - rpm -U amazon-cloudwatch-agent.rpm + - /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/config.json -s + # Apache Sling install + - mkdir -p /opt/sling + - wget https://repo1.maven.org/maven2/org/apache/sling/org.apache.sling.starter/${SlingVersion}/org.apache.sling.starter-${SlingVersion}.jar -O /opt/sling/org.apache.sling.starter.jar +# - mkdir /opt/sling/sling/startup/20/ +# - wget https://repo1.maven.org/maven2/org/mnode/whistlepost/whistlepost-app/1.1.4/whistlepost-app-1.1.4.jar -O /opt/sling/sling/startup/20/ +# - wget https://repo1.maven.org/maven2/org/mnode/whistlepost/whistlepost-config/1.1.4/whistlepost-config-1.1.4.jar -O /opt/sling/sling/startup/20/ +# - wget https://repo1.maven.org/maven2/org/mnode/whistlepost/whistlepost-content/1.1.4/whistlepost-content-1.1.4.jar -O /opt/sling/sling/startup/20/ + - chown -R apachesling:apachesling /opt/sling + - systemctl enable apachesling.service + - systemctl start apachesling.service diff --git a/aws-ec2/apachesling/templates/ubuntu.yml b/aws-ec2/apachesling/templates/ubuntu.yml new file mode 100644 index 00000000..9984c0fb --- /dev/null +++ b/aws-ec2/apachesling/templates/ubuntu.yml @@ -0,0 +1,29 @@ +#cloud-config +packages: + - fail2ban + - unattended-upgrades + - ntpdate + +timezone: Australia/Melbourne + +ntp: + enabled: true + servers: + - 0.au.pool.ntp.org + - 1.au.pool.ntp.org + - 2.au.pool.ntp.org + - 3.au.pool.ntp.org + +users: + - name: ${AuthorizedUserName} + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - ${AuthorizedUserSSHKey} + +runcmd: + - "printf '\nClientAliveInterval 100\nClientAliveCountMax 0' >> /etc/ssh/sshd_config" + - service ssh restart + - curl -O https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb + - dpkg -i -E ./amazon-cloudwatch-agent.deb diff --git a/aws-ec2/apachesolr/export.sh b/aws-ec2/apachesolr/export.sh new file mode 100755 index 00000000..cc8edb4e --- /dev/null +++ b/aws-ec2/apachesolr/export.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ +cp -R /bootstrap/templates ./ diff --git a/aws-ec2/apachesolr/templates/al2.yml b/aws-ec2/apachesolr/templates/al2.yml new file mode 100644 index 00000000..d6dd1745 --- /dev/null +++ b/aws-ec2/apachesolr/templates/al2.yml @@ -0,0 +1,83 @@ +#cloud-config +packages: + - java + - nfs-utils + - chrony + +timezone: Australia/Melbourne + +users: + - name: ${AuthorizedUserName} + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - ${AuthorizedUserSSHKey} + - name: apachesolr + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + +write_files: + - content: | + { + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/var/solr/logs/solr-*-console.log", + "log_group_name": "${SolrHostname}-console-log", + "log_stream_name": "{instance_id}" + } + ] + } + } + }, + "metrics": { + "append_dimensions": { + "AutoScalingGroupName": "$${aws:AutoScalingGroupName}", + "ImageId": "$${aws:ImageId}", + "InstanceId": "$${aws:InstanceId}", + "InstanceType": "$${aws:InstanceType}" + }, + "metrics_collected": { + "mem": { + "measurement": [ + "mem_used_percent" + ], + "metrics_collection_interval": 60 + }, + "swap": { + "measurement": [ + "swap_used_percent" + ], + "metrics_collection_interval": 60 + } + } + } + } + path: /opt/aws/amazon-cloudwatch-agent/etc/config.json + +mounts: + - [ swap, none, swap, sw, 0, 0] + +swap: + filename: /var/cache/swap/swapfile + size: auto + maxsize: 2147483648 + +runcmd: + # Configure SSH + - "printf '\nClientAliveInterval 100\nClientAliveCountMax 0' >> /etc/ssh/sshd_config" + - service sshd restart + - systemctl start chronyd && systemctl enable chronyd + # Cloudwatch Agent install + - curl -O https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm + - rpm -U amazon-cloudwatch-agent.rpm + - /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/config.json -s + # Apache Solr install + - mkdir -p /tmp/install + - wget https://solr/org.apache.solr.starter/${SolrVersion}/ -O /tmp/install + - tar zxvf /tmp/install/solr-${SolrVersion}.tgz solr-${SolrVersion}/bin/install_solr_service.sh --strip-components=2 + - bash /tmp/install/install_solr_service.sh /tmp/install/solr-${SolrVersion}.tgz -u apachesolr \ No newline at end of file diff --git a/blueprints/network/private/aws/Dockerfile b/aws-ec2/autoscaling/Dockerfile similarity index 100% rename from blueprints/network/private/aws/Dockerfile rename to aws-ec2/autoscaling/Dockerfile diff --git a/aws-ec2/autoscaling/README.md b/aws-ec2/autoscaling/README.md new file mode 100644 index 00000000..a6377087 --- /dev/null +++ b/aws-ec2/autoscaling/README.md @@ -0,0 +1,21 @@ +# AWS Autoscaling configuration + +Provision an auto scaling EC2 architecture. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cloudformation\_path | The root path to cloudformation templates | string | `cloudformation` | no | +| environment | The name of the environment applied to the RDS stack | string | - | yes | +| image\_name | AWS image for autoscaling launch configuration | string | `amzn2-ami-hvm-*` | no | +| image\_owner | AMI image owner (leave blank for current account) | string | `137112412989` | no | +| instance\_type | AWS instance type for launch configuration | string | `t2.micro` | no | +| region | AWS default region | string | - | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| ami\_id | - | + diff --git a/aws-ec2/autoscaling/cloudformation/autoscaling.yml b/aws-ec2/autoscaling/cloudformation/autoscaling.yml new file mode 100644 index 00000000..168781e8 --- /dev/null +++ b/aws-ec2/autoscaling/cloudformation/autoscaling.yml @@ -0,0 +1,9 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning an autoscaling stack. + +Parameters: + +Resources: + +Outputs: diff --git a/aws-ec2/autoscaling/cloudformation/autoscaling_launch_config.yml b/aws-ec2/autoscaling/cloudformation/autoscaling_launch_config.yml new file mode 100644 index 00000000..79c5b72e --- /dev/null +++ b/aws-ec2/autoscaling/cloudformation/autoscaling_launch_config.yml @@ -0,0 +1,30 @@ +AsgLaunchConfig: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ImageId + InstanceType: !Ref InstanceType + InstanceMonitoring: true + IamInstanceProfile: !Ref AsgInstanceProfile + SecurityGroups: + - !Ref InstanceSG + AssociatePublicIpAddress: false + UserData: + Fn::Base64: !Sub | + #!/bin/bash + echo ECS_CLUSTER=${EcsCluster} >> /etc/ecs/ecs.config + +AsgSpotLaunchConfig: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ImageId + InstanceType: !Ref InstanceType + SpotPrice: !Ref SpotPrice + InstanceMonitoring: true + IamInstanceProfile: !Ref EcsInstanceProfile + SecurityGroups: + - !Ref InstanceSG + AssociatePublicIpAddress: false + UserData: + Fn::Base64: !Sub | + #!/bin/bash + echo ECS_CLUSTER=${EcsCluster} >> /etc/ecs/ecs.config diff --git a/aws-ec2/autoscaling/cloudformation/autoscaling_windows_config.yml b/aws-ec2/autoscaling/cloudformation/autoscaling_windows_config.yml new file mode 100644 index 00000000..2339d1f0 --- /dev/null +++ b/aws-ec2/autoscaling/cloudformation/autoscaling_windows_config.yml @@ -0,0 +1,34 @@ +EcsLaunchConfig: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ImageId + InstanceType: !Ref InstanceType + InstanceMonitoring: true + IamInstanceProfile: !Ref EcsInstanceProfile + SecurityGroups: + - !Ref InstanceSG + AssociatePublicIpAddress: false + UserData: + Fn::Base64: !Sub | + + Import-Module ECSTools + Initialise-ECSAgent -Cluster '${EcsCluster}' -EnableTaskIAMRole + + +EcsSpotLaunchConfig: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ImageId + InstanceType: !Ref InstanceType + SpotPrice: !Ref SpotPrice + InstanceMonitoring: true + IamInstanceProfile: !Ref EcsInstanceProfile + SecurityGroups: + - !Ref InstanceSG + AssociatePublicIpAddress: false + UserData: + Fn::Base64: !Sub | + + Import-Module ECSTools + Initialise-ECSAgent -Cluster '${EcsCluster}' -EnableTaskIAMRole + diff --git a/aws-ec2/autoscaling/export.sh b/aws-ec2/autoscaling/export.sh new file mode 100755 index 00000000..8d8c78df --- /dev/null +++ b/aws-ec2/autoscaling/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-ec2/autoscaling/main.tf b/aws-ec2/autoscaling/main.tf new file mode 100644 index 00000000..05498130 --- /dev/null +++ b/aws-ec2/autoscaling/main.tf @@ -0,0 +1,31 @@ +/** + * # AWS Autoscaling configuration + * + * Provision an auto scaling EC2 architecture. + */ +data "aws_caller_identity" "current" {} + +data "aws_ami" "autoscaling_image" { + filter { + name = "name" + values = ["${var.image_name}"] + } + + filter { + name = "state" + values = ["available"] + } + + most_recent = true + owners = ["${replace(var.image_owner, "/\\A\\z/", data.aws_caller_identity.current.account_id)}"] +} + +resource "aws_cloudformation_stack" "autoscale" { + name = "${var.environment}-autoscale" + + parameters { + Environment = "${var.environment}" + } + + template_body = "${file(format("%s/autoscaling.yml", var.cloudformation_path))}" +} diff --git a/aws-ec2/autoscaling/outputs.tf b/aws-ec2/autoscaling/outputs.tf new file mode 100644 index 00000000..6a7a0268 --- /dev/null +++ b/aws-ec2/autoscaling/outputs.tf @@ -0,0 +1,3 @@ +output "ami_id" { + value = "${data.aws_ami.autoscaling_image.id}" +} diff --git a/aws-ec2/autoscaling/roles.tf b/aws-ec2/autoscaling/roles.tf new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/aws-ec2/autoscaling/roles.tf @@ -0,0 +1 @@ + diff --git a/aws-ec2/autoscaling/vars.tf b/aws-ec2/autoscaling/vars.tf new file mode 100644 index 00000000..aad42e66 --- /dev/null +++ b/aws-ec2/autoscaling/vars.tf @@ -0,0 +1,19 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} + +variable "image_name" { + description = "AWS image for autoscaling launch configuration" + default = "amzn2-ami-hvm-2.0.????????-x86_64-gp2" +} + +variable "image_owner" { + description = "AMI image owner (leave blank for current account)" + default = "137112412989" +} + +variable "instance_type" { + description = "AWS instance type for launch configuration" + default = "t2.micro" +} diff --git a/aws-ec2/bastion/README.md b/aws-ec2/bastion/README.md new file mode 100644 index 00000000..77cd8da3 --- /dev/null +++ b/aws-ec2/bastion/README.md @@ -0,0 +1,27 @@ +# AWS Bastion host configuration + +Provision an EC2 instance with SSH ingress authenticated with the specified public key. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| bastion\_user | Username for bastion SSH user | string | - | yes | +| enabled | Start/stop the bastion host | string | `true` | no | +| image\_name | AWS image for bastion instance | string | `amzn2-ami-hvm-*` | no | +| image\_os | The operating system installed on the selected AMI. Valid values are:

* al2 = Amazon Linux 2 * ubuntu = Ubuntu | string | `al2` | no | +| image\_owner | AMI image owner (leave blank for current account) | string | `137112412989` | no | +| instance\_type | AWS instance type for bastion | string | `t3.nano` | no | +| region | AWS default region | string | - | yes | +| shutdown\_delay | Number of minutes before the host will automatically shutdown | string | `60` | no | +| ssh\_key | Location of public key file for SSH access to droplets | string | `~/.ssh/id_rsa.pub` | no | +| userdata\_path | The root path to userdata templates | string | `userdata` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ami\_id | - | +| instance\_ip | - | + diff --git a/blueprints/rancher/server/do/export.sh b/aws-ec2/bastion/export.sh similarity index 100% rename from blueprints/rancher/server/do/export.sh rename to aws-ec2/bastion/export.sh diff --git a/aws-ec2/bastion/main.tf b/aws-ec2/bastion/main.tf new file mode 100644 index 00000000..0b6dc1b5 --- /dev/null +++ b/aws-ec2/bastion/main.tf @@ -0,0 +1,113 @@ +/** + * # AWS Bastion host configuration + * + * Provision an EC2 instance with SSH ingress authenticated with the specified public key. + */ +data "aws_caller_identity" "current" {} + +data "aws_vpc" "tenant" { + default = var.vpc_default + tags = var.vpc_tags +} + +data "aws_ami" "bastion_image" { + filter { + name = "name" + values = [ + var.image_name] + } + + filter { + name = "state" + values = ["available"] + } + + most_recent = true + owners = [ + replace(var.image_owner, "/\\A\\z/", data.aws_caller_identity.current.account_id)] +} + +data "template_file" "userdata" { + template = file(format("%s/%s.yml", var.template_path, var.image_os)) + + vars = { + AuthorizedUserName = var.ssh_user + + // AuthorizedUserSSHKey = "${replace(var.ssh_key, "/\\A\\z/", file(var.ssh_key_file))}" + AuthorizedUserSSHKey = replace(var.ssh_key, "/\\A\\z/", "") + ShutdownDelay = var.shutdown_delay + } +} + +data "aws_iam_role" "instance" { + name = "bedrock-bastion-instance" +} + +resource "aws_security_group" "bastion" { + name = "bastion-sg" + + ingress { + protocol = "TCP" + from_port = 22 + to_port = 22 + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + protocol = "TCP" + from_port = 80 + to_port = 80 + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + protocol = "TCP" + from_port = 443 + to_port = 443 + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + protocol = "TCP" + from_port = 22 + to_port = 22 + cidr_blocks = [ + data.aws_vpc.tenant.cidr_block] + } + + tags = { + Name = "BastionSG" + } +} + +resource "aws_iam_instance_profile" "bastion" { + name = "bedrock-bastion-instance" + role = data.aws_iam_role.instance.name +} + +resource "aws_instance" "bastion" { + count = replace(replace(var.enabled, "/false/", 0), "/true/", 1) + ami = data.aws_ami.bastion_image.id + instance_type = var.instance_type + security_groups = [ + aws_security_group.bastion.name] + user_data = data.template_file.userdata.rendered + iam_instance_profile = aws_iam_instance_profile.bastion.name + instance_initiated_shutdown_behavior = "terminate" + + tags = { + Name = "bastion" + } +} + +data "aws_route53_zone" "primary" { + name = "${local.hosted_zone}." +} + +resource "aws_route53_record" "bastion" { + zone_id = data.aws_route53_zone.primary.zone_id + name = var.bastion_fqdn + type = "CNAME" + ttl = var.record_ttl + records = [aws_instance.bastion[0].public_dns] +} diff --git a/aws-ec2/bastion/outputs.tf b/aws-ec2/bastion/outputs.tf new file mode 100644 index 00000000..1562a73c --- /dev/null +++ b/aws-ec2/bastion/outputs.tf @@ -0,0 +1,7 @@ +output "ami_id" { + value = "${data.aws_ami.bastion_image.id}" +} + +output "instance_ip" { + value = "${aws_instance.bastion.*.public_ip}" +} diff --git a/blueprints/dns/record/aws/provider.tf b/aws-ec2/bastion/provider.tf similarity index 91% rename from blueprints/dns/record/aws/provider.tf rename to aws-ec2/bastion/provider.tf index e643b28d..8f1f1826 100644 --- a/blueprints/dns/record/aws/provider.tf +++ b/aws-ec2/bastion/provider.tf @@ -1,7 +1,7 @@ provider "aws" { version = ">= 2.7.0" assume_role { - role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-route53-admin" + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-bastion-admin" } } diff --git a/blueprints/bastion/openstack/templates/centos7.yml b/aws-ec2/bastion/templates/al2.yml similarity index 63% rename from blueprints/bastion/openstack/templates/centos7.yml rename to aws-ec2/bastion/templates/al2.yml index 91915f25..3beea9b0 100644 --- a/blueprints/bastion/openstack/templates/centos7.yml +++ b/aws-ec2/bastion/templates/al2.yml @@ -1,12 +1,12 @@ #cloud-config -#yum_repos: -# epel-testing: -# baseurl: http://dl.fedoraproject.org/pub/epel/7/$basearch -# enabled: true -# failovermethod: priority -# gpgcheck: true -# gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL -# name: Extra Packages for Enterprise Linux 7 - Testing +yum_repos: + epel-testing: + baseurl: http://dl.fedoraproject.org/pub/epel/7/$basearch + enabled: true + failovermethod: priority + gpgcheck: true + gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL + name: Extra Packages for Enterprise Linux 7 - Testing packages: # - fail2ban @@ -31,13 +31,12 @@ users: ssh-authorized-keys: - ${AuthorizedUserSSHKey} -bootcmd: - - shutdown -P +60 System will shutdown in 60 minutes - runcmd: - "printf '\nClientAliveInterval 100\nClientAliveCountMax 0' >> /etc/ssh/sshd_config" - service sshd restart - systemctl start chronyd && systemctl enable chronyd + - curl -O https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm + - rpm -U amazon-cloudwatch-agent.rpm power_state: timeout: 120 diff --git a/blueprints/bastion/openstack/templates/ubuntu.yml b/aws-ec2/bastion/templates/ubuntu.yml similarity index 100% rename from blueprints/bastion/openstack/templates/ubuntu.yml rename to aws-ec2/bastion/templates/ubuntu.yml diff --git a/aws-ec2/cleanup/Dockerfile b/aws-ec2/cleanup/Dockerfile new file mode 100644 index 00000000..b1b70391 --- /dev/null +++ b/aws-ec2/cleanup/Dockerfile @@ -0,0 +1,6 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ +ADD lambda /bootstrap/lambda + +ENV TF_VAR_lambda_path="/bootstrap/lambda" diff --git a/aws-ec2/cleanup/README.md b/aws-ec2/cleanup/README.md new file mode 100644 index 00000000..702716b8 --- /dev/null +++ b/aws-ec2/cleanup/README.md @@ -0,0 +1,12 @@ +# EC2 Image Cleanup + +A Lambda function to support removal of stale EC2 Images (AMIs). + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-ec2/cleanup/lambda/Ec2CleanupAMIs.py b/aws-ec2/cleanup/lambda/Ec2CleanupAMIs.py new file mode 100644 index 00000000..b7b5aece --- /dev/null +++ b/aws-ec2/cleanup/lambda/Ec2CleanupAMIs.py @@ -0,0 +1,69 @@ +import re +from datetime import datetime, timedelta + +import boto3 + + +def lambda_handler(event, context): + ec2_images = get_ec2_images(event) + + protected_ami_ids = get_protected_ami_ids(ec2_images) + + for image in filter(lambda i: i.image_id not in protected_ami_ids, ec2_images): + print(f'Deregistering {image.name} ({image.id})') + if event['DryRun'] != 'true': + image.deregister() + + remove_unattached_snapshots(event, '.*from SourceAmi (ami-.*) from.*') + + +def get_ec2_images(event): + ec2 = boto3.client('ec2') + + if 'ImageNameFilter' in event: + return ec2.images.filter(Owners=["self"], Filters=[ + {"Name": "name", "Values": [event['ImageNameFilter']]} + ]) + else: + return ec2.images.filter(Owners=["self"]) + + +def get_protected_ami_ids(ec2_images): + ec2 = boto3.client('ec2') + + images_in_use = { instance.image_id for instance in ec2.instances.all()} + + young_images = set() + for image in ec2_images: + created_at = datetime.strptime(image.creation_date, "%Y-%m-%dT%H:%M:%S.000Z") + if created_at > datetime.now() - timedelta(90): + young_images.add(image.id) + + latest_dict = dict() + for image in ec2_images: + split = image.name.split('-') + try: + timestamp = int(split[-1]) + except ValueError: + continue + name = '-'.join(split[:-1]) + + if (name not in latest_dict or timestamp > latest_dict[name][0]): + latest_dict[name] = (timestamp, image) + + latest_images = {image.id for (_, image) in latest_dict.values()} + + return images_in_use | young_images | latest_images + + +def remove_unattached_snapshots(event, description_filter): + ec2 = boto3.client('ec2') + + all_images = [image.id for image in ec2.images.all()] + for snapshot in ec2.snapshots.filter(OwnerIds=["self"]): + print(f'Checking snapshot {snapshot.id}') + match = re.match(rf"{description_filter}", snapshot.description) + if match and match.groups()[0] not in all_images: + print(f'Deleting snapshot {snapshot.id}') + if event['DryRun'] != 'true': + snapshot.delete() diff --git a/aws-ec2/cleanup/main.tf b/aws-ec2/cleanup/main.tf new file mode 100644 index 00000000..bfdb255c --- /dev/null +++ b/aws-ec2/cleanup/main.tf @@ -0,0 +1,28 @@ +/** + * # EC2 Image Cleanup + * + * A Lambda function to support removal of stale EC2 Images (AMIs). + */ +data "archive_file" "ec2_cleanup" { + output_path = "ec2_cleanup.zip" + type = "zip" + source_dir = "${format("%s", var.lambda_path)}" +} + +data "aws_iam_role" "ec2_cleanup" { + name = "bedrock-ec2-cleanup-role" +} + +resource "aws_lambda_function" "ec2_cleanup" { + function_name = "Ec2CleanupAMIs" + handler = "Ec2CleanupAMIs.lambda_handler" + filename = "${data.archive_file.ec2_cleanup.output_path}" + role = "${data.aws_iam_role.ec2_cleanup.arn}" + runtime = "python3.6" + source_code_hash = "${data.archive_file.ec2_cleanup.output_base64sha256}" +} + +resource "aws_cloudwatch_log_group" "ec2_cleanup" { + name = "/aws/lambda/${aws_lambda_function.ec2_cleanup.function_name}" + retention_in_days = 30 +} diff --git a/aws-ec2/cleanup/vars.tf b/aws-ec2/cleanup/vars.tf new file mode 100644 index 00000000..4b0fb0d1 --- /dev/null +++ b/aws-ec2/cleanup/vars.tf @@ -0,0 +1,4 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} diff --git a/aws-ec2/cloudformation/README.md b/aws-ec2/cloudformation/README.md new file mode 100644 index 00000000..794df991 --- /dev/null +++ b/aws-ec2/cloudformation/README.md @@ -0,0 +1,23 @@ +# AWS reverse proxy configuration + +Provision an NGINX reverse proxy for an environment. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| amplify\_key | API key for nginx amplify | string | - | yes | +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| cloudformation\_path | The root path to cloudformation templates | string | `cloudformation` | no | +| environment | The name of the environment associated with the reverse proxy | string | - | yes | +| image\_name | AWS image for autoscaling launch configuration | string | `amzn2-ami-hvm-*` | no | +| image\_os | The operating system installed on the selected AMI. Valid values are:

* al2 = Amazon Linux 2 * ubuntu = Ubuntu | string | `al2` | no | +| image\_owner | AMI image owner (leave blank for current account) | string | `137112412989` | no | +| instance\_type | AWS instance type for launch configuration | string | `t3.nano` | no | +| private\_zone | Hosted zone identifier for private DNS entry | string | - | yes | +| public\_zone | Hosted zone identifier for public DNS entry | string | - | yes | +| region | AWS default region | string | - | yes | +| reverseproxy\_user | Username for reverseproxy SSH user | string | - | yes | +| ssh\_key | Location of public key file for SSH access to reverseproxy | string | `~/.ssh/id_rsa.pub` | no | +| userdata\_path | The root path to userdata templates | string | `userdata` | no | + diff --git a/aws-ec2/cloudformation/cloudformation/reverseproxy.yml b/aws-ec2/cloudformation/cloudformation/reverseproxy.yml new file mode 100644 index 00000000..52b5767f --- /dev/null +++ b/aws-ec2/cloudformation/cloudformation/reverseproxy.yml @@ -0,0 +1,259 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning an NGINX reverse proxy stack. + +Parameters: + VpcId: + Type: AWS::EC2::VPC::Id + Description: VPC used to provision cluster + VpcCidrBlock: + Type: String + Description: VPC CIDR IP range + ImageId: + Type: AWS::EC2::Image::Id + Description: EC2 AMI ID + InstanceType: + Type: String + Description: EC2 instance type + EC2BurstMode: + Type: String + Description: Specifies whether to throttle or charge for CPU credits once allocation is exhausted + AllowedValues: [standard, unlimited] + Default: standard + Environment: + Type: String + Description: The name of the environment to associate with + UserData: + Type: String + Description: Bootstrap configuration to run on EC2 instance + PublicHostedZoneName: + Type: String + Description: Public hosted zone for route53 entry + PrivateHostedZoneName: + Type: String + Description: Private hosted zone for route53 entry + PublicRouteName: + Type: String + Description: Public FQDN for the Reverse Proxy instance + PrivateRouteName: + Type: String + Description: Private FQDN for the Reverse Proxy instance + +Resources: + ReverseProxySG: + Type: AWS::EC2::SecurityGroup + Properties: + GroupName: ReverseProxySG + GroupDescription: Security group for ReverseProxy + VpcId: !Ref VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 443 + ToPort: 443 + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: 80 + ToPort: 80 + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCidrBlock + SecurityGroupEgress: + - IpProtocol: tcp + FromPort: 443 + ToPort: 443 + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: 80 + ToPort: 80 + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: 53 + ToPort: 53 + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: 53 + ToPort: 53 + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: 8080 + ToPort: 8080 + CidrIp: !Ref VpcCidrBlock + Tags: + - Key: Name + Value: ReverseProxySG + + ReverseProxyEC2Role: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - sts:AssumeRole + Principal: + Service: + - ec2.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM + - arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy + + ReverseProxyInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Roles: + - !Ref ReverseProxyEC2Role + + ReverseProxyInstance: + Type: AWS::EC2::Instance + Metadata: + AWS::CloudFormation::Init: + configSets: + default: + - 01_setupCfnHup + - 02_config-amazon-cloudwatch-agent + - 03_restart_amazon-cloudwatch-agent + UpdateEnvironment: + - 02_config-amazon-cloudwatch-agent + - 03_restart_amazon-cloudwatch-agent + 02_config-amazon-cloudwatch-agent: + files: + '/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json': + content: !Sub | + { + "metrics": { + "append_dimensions": { + "AutoScalingGroupName": "${!aws:AutoScalingGroupName}", + "ImageId": "${!aws:ImageId}", + "InstanceId": "${!aws:InstanceId}", + "InstanceType": "${!aws:InstanceType}" + }, + "metrics_collected": { + "mem": { + "measurement": [ + "mem_used_percent" + ] + }, + "swap": { + "measurement": [ + "swap_used_percent" + ] + } + } + } + } + # Invoke amazon-cloudwatch-agent-ctl to restart the AmazonCloudWatchAgent. + 03_restart_amazon-cloudwatch-agent: + commands: + 01_stop_service: + command: /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a stop + 02_start_service: + command: /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json -s + # Cfn-hup setting, it is to monitor the change of metadata. + # When there is change in the contents of json file in the metadata section, cfn-hup will call cfn-init to restart the AmazonCloudWatchAgent. + 01_setupCfnHup: + files: + '/etc/cfn/cfn-hup.conf': + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + interval=1 + mode: '000400' + owner: root + group: root + '/etc/cfn/hooks.d/amazon-cloudwatch-agent-auto-reloader.conf': + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.EC2Instance.Metadata.AWS::CloudFormation::Init.02_config-amazon-cloudwatch-agent + action=/opt/aws/bin/cfn-init -v --stack ${AWS::StackId} --resource EC2Instance --region ${AWS::Region} --configsets UpdateEnvironment + runas=root + mode: '000400' + owner: root + group: root + "/lib/systemd/system/cfn-hup.service": + content: !Sub | + [Unit] + Description=cfn-hup daemon + [Service] + Type=simple + ExecStart=/opt/aws/bin/cfn-hup + Restart=always + [Install] + WantedBy=multi-user.target + commands: + 01enable_cfn_hup: + command: !Sub | + systemctl enable cfn-hup.service + 02start_cfn_hup: + command: !Sub | + systemctl start cfn-hup.service + Properties: + IamInstanceProfile: !Ref ReverseProxyInstanceProfile + ImageId: !Ref ImageId + InstanceType: !Ref InstanceType + CreditSpecification: + CPUCredits: !Ref EC2BurstMode +# KeyName: !Ref KeyPair + Monitoring: true + SecurityGroupIds: + - !Ref ReverseProxySG +# SsmAssociations: +# - SSMAssociation +# SubnetId: !Ref SubnetId + Tags: + - Key: Name + Value: !Sub ${Environment}-reverseproxy + - Key: Environment + Value: !Ref Environment + UserData: !Ref UserData + + ReverseProxyEIP: + Type: AWS::EC2::EIP + Properties: + InstanceId: !Ref ReverseProxyInstance + + ReverseProxyPublicRecordSet: + Type: AWS::Route53::RecordSet + Properties: + HostedZoneName: !Ref PublicHostedZoneName + Name: !Ref PublicRouteName + Type: A + TTL: '900' + ResourceRecords: + - !Ref ReverseProxyEIP + + ReverseProxyPrivateRecordSet: + Type: AWS::Route53::RecordSet + Properties: + HostedZoneName: !Ref PrivateHostedZoneName + Name: !Ref PrivateRouteName + Type: A + TTL: '900' + ResourceRecords: + - !GetAtt ReverseProxyInstance.PrivateIp + + ReverseProxyHealthCheck: + Type: AWS::Route53::HealthCheck + Properties: + HealthCheckConfig: + FullyQualifiedDomainName: !Ref ReverseProxyPublicRecordSet + Type: HTTP + + ReverseProxyLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub ${Environment}-reverseproxy + RetentionInDays: 7 + +# ReverseProxyDashboard: +# Type: AWS::CloudWatch::Dashboard +# Properties: +# DashboardName: ReverseProxy +# DashboardBody: !Sub | +# {} +# +#Outputs: diff --git a/aws-ec2/cloudformation/export.sh b/aws-ec2/cloudformation/export.sh new file mode 100755 index 00000000..cc8edb4e --- /dev/null +++ b/aws-ec2/cloudformation/export.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ +cp -R /bootstrap/templates ./ diff --git a/aws-ec2/cloudformation/main.tf b/aws-ec2/cloudformation/main.tf new file mode 100644 index 00000000..8f8463c5 --- /dev/null +++ b/aws-ec2/cloudformation/main.tf @@ -0,0 +1,67 @@ +/** + * # AWS reverse proxy configuration + * + * Provision an NGINX reverse proxy for an environment. + */ +data "aws_caller_identity" "current" {} + +data "aws_vpc" "tenant" { + default = var.vpc_default + tags = var.vpc_tags +} + +//data "aws_subnet_ids" "default" { +// vpc_id = "${data.aws_vpc.default.id}" +// tags { +// Name = "${var.subnet}" +// } +//} + +data "aws_ami" "autoscaling_image" { + filter { + name = "name" + values = [var.image_name] + } + filter { + name = "state" + values = ["available"] + } + most_recent = true + owners = [replace(var.image_owner, "/\\A\\z/", data.aws_caller_identity.current.account_id)] +} + +data "aws_iam_role" "nginx_cloudformation" { + name = "bedrock-nginx-cloudformation" +} + +data "template_file" "userdata" { + template = file(format("%s/%s.yml", var.template_path, var.image_os)) + vars = { + NginxAmplifyKey = var.amplify_key + NginxHostname = "${var.environment}-reverseproxy" + AuthorizedUserName = var.ssh_user + // AuthorizedUserSSHKey = "${replace(var.ssh_key, "/\\A\\z/", file(var.ssh_key_file))}" + AuthorizedUserSSHKey = replace(var.ssh_key, "/\\A\\z/", "") + } +} + +resource "aws_cloudformation_stack" "reverseproxy" { + name = "${var.environment}-reverseproxy" + capabilities = ["CAPABILITY_IAM"] + iam_role_arn = data.aws_iam_role.nginx_cloudformation.arn + parameters { + Environment = var.environment + // KeyPair = "" + VpcId = data.aws_vpc.tenant.id + VpcCidrBlock = data.aws_vpc.tenant.cidr_block + // SubnetId = "${data.aws_subnet_ids.default.ids[0]}" + ImageId = data.aws_ami.autoscaling_image.image_id + InstanceType = var.instance_type + UserData = base64encode(data.template_file.userdata.rendered) + PublicHostedZoneName = "${var.public_zone}." + PrivateHostedZoneName = "${var.private_zone}." + PublicRouteName = "${var.environment}-reverseproxy.${var.public_zone}" + PrivateRouteName = "${var.environment}-reverseproxy.${var.private_zone}" + } + template_body = file(format("%s/reverseproxy.yml", var.cloudformation_path)) +} diff --git a/aws-ec2/cloudformation/templates/al2.yml b/aws-ec2/cloudformation/templates/al2.yml new file mode 100644 index 00000000..58cb6dca --- /dev/null +++ b/aws-ec2/cloudformation/templates/al2.yml @@ -0,0 +1,106 @@ +#cloud-config +packages: + - curl + - python + - chrony + +timezone: Australia/Melbourne + +users: + - name: ${AuthorizedUserName} + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - ${AuthorizedUserSSHKey} + +write_files: + - content: | + server { + listen 127.0.0.1:80; + server_name 127.0.0.1; + location /nginx_status { + stub_status on; + allow 127.0.0.1; + deny all; + } + } + path: /etc/nginx/conf.d/stub_status.conf + - content: | + map $http_user_agent $log_ua { + ~Amazon-Route53-Health-Check-Service 0; + ~Googlebot 0; + ~Baiduspider 0; + + default 1; + } + + access_log /var/log/nginx/access.log main if=$log_ua; + path: /etc/nginx/conf.d/health_check.conf + - content: | + { + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/var/log/nginx/access.log", + "log_group_name": "${NginxHostname}-access-log", + "log_stream_name": "{instance_id}" + }, + { + "file_path": "/var/log/nginx/error.log", + "log_group_name": "${NginxHostname}-error-log", + "log_stream_name": "{instance_id}" + } + ] + } + } + }, + "metrics": { + "append_dimensions": { + "AutoScalingGroupName": "$${aws:AutoScalingGroupName}", + "ImageId": "$${aws:ImageId}", + "InstanceId": "$${aws:InstanceId}", + "InstanceType": "$${aws:InstanceType}" + }, + "metrics_collected": { + "mem": { + "measurement": [ + "mem_used_percent" + ], + "metrics_collection_interval": 60 + }, + "swap": { + "measurement": [ + "swap_used_percent" + ], + "metrics_collection_interval": 60 + } + } + } + } + path: /opt/aws/amazon-cloudwatch-agent/etc/config.json + +mounts: + - [ swap, none, swap, sw, 0, 0] + +swap: + filename: /var/cache/swap/swapfile + size: auto + maxsize: 2147483648 + +runcmd: + - "printf '\nClientAliveInterval 100\nClientAliveCountMax 0' >> /etc/ssh/sshd_config" + - service sshd restart + - systemctl start chronyd && systemctl enable chronyd + # Cloudwatch Agent install + - curl -O https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/amd64/latest/amazon-cloudwatch-agent.rpm + - rpm -U amazon-cloudwatch-agent.rpm + - /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/config.json -s + # NGINX install + - amazon-linux-extras install nginx1.12 epel + - systemctl start nginx && systemctl enable nginx + - curl -sS -L -O https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh + - API_KEY="${NginxAmplifyKey}" AMPLIFY_HOSTNAME="${NginxHostname}" sh ./install.sh -y + - yum install -y python2-certbot-nginx diff --git a/aws-ec2/cloudformation/templates/ubuntu.yml b/aws-ec2/cloudformation/templates/ubuntu.yml new file mode 100644 index 00000000..89fd2a5e --- /dev/null +++ b/aws-ec2/cloudformation/templates/ubuntu.yml @@ -0,0 +1,42 @@ +#cloud-config +apt: + sources: + certbot: + source: ppa:certbot/certbot + +packages: + - nginx + - unattended-upgrades + - curl + - ntpdate + - python + - python-certbot-nginx + +timezone: Australia/Melbourne + +ntp: + enabled: true + servers: + - 0.au.pool.ntp.org + - 1.au.pool.ntp.org + - 2.au.pool.ntp.org + - 3.au.pool.ntp.org + +write_files: + - content: | + server { + listen 127.0.0.1:80; + server_name 127.0.0.1; + location /nginx_status { + stub_status on; + allow 127.0.0.1; + deny all; + } + } + path: /etc/nginx/conf.d/stub_status.conf + +runcmd: + - export API_KEY="${NginxAmplifyKey}" + - curl -L https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh | bash + - curl -O https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb + - dpkg -i -E ./amazon-cloudwatch-agent.deb diff --git a/aws-ec2/cloudformation/vars.tf b/aws-ec2/cloudformation/vars.tf new file mode 100644 index 00000000..b84e8a09 --- /dev/null +++ b/aws-ec2/cloudformation/vars.tf @@ -0,0 +1,82 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} + +variable "template_path" { + description = "The root path to userdata templates" + default = "templates" +} + +variable "vpc_default" { + description = "Boolean value to indicate whether the matched VPC should be default for the region" + default = "true" +} + +variable "vpc_tags" { + type = "map" + description = "A map of tags to match on the VPC lookup" + default = {} +} + +variable "environment" { + description = "The name of the environment associated with the reverse proxy" +} + +//variable "subnet" { +// description = "The name of the VPC subnet in which to deploy the EC2 instance" +//} + +variable "image_name" { + description = "AWS image for autoscaling launch configuration" + default = "amzn2-ami-hvm-2.0.????????-x86_64-gp2" + // default = "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*" +} + +variable "image_owner" { + description = "AMI image owner (leave blank for current account)" + default = "137112412989" + // Canonical + // default = "679593333241" +} + +variable "image_os" { + description = < /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-ec2/spotfleet/cluster/main.tf b/aws-ec2/spotfleet/cluster/main.tf new file mode 100644 index 00000000..ab91f01f --- /dev/null +++ b/aws-ec2/spotfleet/cluster/main.tf @@ -0,0 +1,27 @@ +/** + * # AWS SpotFleet cluster configuration + * + * Provision a Spot Fleet cluster. + */ +data "aws_caller_identity" "current" {} + +data "aws_ami" "spotfleet_image" { + filter { + name = "name" + values = ["${var.image_name}"] + } + filter { + name = "state" + values = ["available"] + } + most_recent = true + owners = ["${replace(var.image_owner, "/\\A\\z/", data.aws_caller_identity.current.account_id)}"] +} + +resource "aws_cloudformation_stack" "spotfleet" { + name = "${var.environment}-spotfleet" + parameters { + Environment = "${var.environment}" + } + template_body = "${file(format("%s/spotfleet.yml", var.cloudformation_path))}" +} diff --git a/aws-ec2/spotfleet/cluster/provider.tf b/aws-ec2/spotfleet/cluster/provider.tf new file mode 100644 index 00000000..3ef05c9d --- /dev/null +++ b/aws-ec2/spotfleet/cluster/provider.tf @@ -0,0 +1,8 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" +} + +variable "region" { + description = "AWS default region" +} diff --git a/aws-ec2/spotfleet/cluster/vars.tf b/aws-ec2/spotfleet/cluster/vars.tf new file mode 100644 index 00000000..f96085dd --- /dev/null +++ b/aws-ec2/spotfleet/cluster/vars.tf @@ -0,0 +1,19 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} + +variable "image_name" { + description = "AWS image for spotfleet launch specification" + default = "amzn2-ami-hvm-2.0.????????-x86_64-gp2" +} + +variable "image_owner" { + description = "AMI image owner (leave blank for current account)" + default = "137112412989" +} + +variable "instance_type" { + description = "AWS instance type for launch specification" + default = "t2.micro" +} diff --git a/aws-ec2/spotfleet/main.tf b/aws-ec2/spotfleet/main.tf new file mode 100644 index 00000000..c378218f --- /dev/null +++ b/aws-ec2/spotfleet/main.tf @@ -0,0 +1,44 @@ +/** + * # AWS SpotFleet role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Spot Fleet clusters specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +resource "aws_iam_role" "clusteradmin" { + name = "spotfleet-bedrock-clusteradmin" + assume_role_policy = <./README.md + +format: + $(TERRAFORM) fmt -list=true ./ +# +#new: +# $(TERRAFORM) workspace new $(NAME) && $(TERRAFORM) init +# +#refresh: +# $(TERRAFORM) init $(NAME) && $(TERRAFORM) apply +# +#destroy: +# $(TERRAFORM) destroy $(NAME) + +plan: + $(TERRAFORM) plan -input=false + +apply: + $(TERRAFORM) apply -input=false diff --git a/aws-ecr/aws-ecr/tfplan b/aws-ecr/aws-ecr/tfplan new file mode 100644 index 00000000..1e8e780a Binary files /dev/null and b/aws-ecr/aws-ecr/tfplan differ diff --git a/aws-ecr/main.tf b/aws-ecr/main.tf new file mode 100644 index 00000000..5c2ad51d --- /dev/null +++ b/aws-ecr/main.tf @@ -0,0 +1,18 @@ +module "repository" { + source = "figurate/ecr-repository/aws" + + name = var.name + scan_on_push = var.scan_on_push + untagged_image_expiry_days = var.untagged_image_expiry_days +} + +module "import" { + count = var.import_enabled ? length(var.source_tags) : 0 + source = "figurate/docker-container/docker//modules/ecr" + + name = "${var.name}_push_${var.source_tags[count.index]}" + command = ["push", var.name, var.source_tags[count.index], var.source_tags[count.index]] + depends_on = [module.repository] + rm = true + aws_region = var.aws_region +} diff --git a/aws-ecr/provider.tf b/aws-ecr/provider.tf new file mode 100644 index 00000000..0b00357a --- /dev/null +++ b/aws-ecr/provider.tf @@ -0,0 +1,5 @@ +provider "aws" {} + +//provider "docker" { +// host = "tcp://127.0.0.1:2375/" +//} diff --git a/aws-ecr/vars.tf b/aws-ecr/vars.tf new file mode 100644 index 00000000..78c575a8 --- /dev/null +++ b/aws-ecr/vars.tf @@ -0,0 +1,29 @@ +variable "name" { + description = "ECR Registry name" +} + +variable "scan_on_push" { + description = "Automatically scan pushed images for vulnerabilities" + default = true +} + +variable "untagged_image_expiry_days" { + description = "Age (in days) of untagged images to be removed automatically" + type = number + default = 7 +} + +variable "source_tags" { + description = "A list of tags for image import from external registry" + type = list(string) + default = ["latest", "alpine"] +} + +variable "aws_region" { + description = "AWS region for running ECR commands" +} + +variable "import_enabled" { + description = "Flag to enable push to ECR" + default = false +} diff --git a/aws-ecr/versions.tf b/aws-ecr/versions.tf new file mode 100644 index 00000000..3e17c30c --- /dev/null +++ b/aws-ecr/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + docker = { + source = "kreuzwerker/docker" + version = "2.10.0" + } + } + required_version = ">= 0.13" +} diff --git a/aws-ecs/.terraform.lock.hcl b/aws-ecs/.terraform.lock.hcl new file mode 100644 index 00000000..56add126 --- /dev/null +++ b/aws-ecs/.terraform.lock.hcl @@ -0,0 +1,41 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "3.46.0" + constraints = ">= 2.7.0" + hashes = [ + "h1:sNDpXH+7Ay7efB1MSdd5Pb5WL/Ww5EHvLFsqCD5ceD0=", + "zh:3ec89dba1d9ed494c5a8069b98d230289c736f5d7abb0d47d6d657d1c9a22a38", + "zh:47dd0ba54897a43aa22a9009d9eddec30d2e656a6153219335af23c5be609e47", + "zh:482164d6d7782d574d6ef3740d02a3b3566c9e3f03021b497675aa4aa6855ef9", + "zh:5b068dd406e0989cb1b1ce390b8dc33eb77997a594b500dea3d39595e67086b3", + "zh:7bb6dbe99cd483db05d28e0e3109dac6be233961f816b1145035f0f49b30bbde", + "zh:7c245831b5e062b0207b988821d6ed674516c78b81afe0fc015a58e40b973d05", + "zh:7f3fb2457ff59c6e3795acd0995cb3ec3b0f22fce5ab8b261e8480bc752787a6", + "zh:8dcbb64802f38dc20fccedaf93dbfbf367859eba81fe7fa4dc734323f287cf4a", + "zh:da6c412927a514e46ff81e4044ce29617b7c11d33db99ff959a761f97ca09fce", + "zh:e670cda0e9ffcd791d94bb1822c26e2a1d26cb0e7a7b655019f4375a14e04e90", + "zh:ebf9c5ef3eceebc1c21bcd31e535e5c323c3bf6ca5918959e297e9a6617d8094", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "2.2.0" + constraints = "~> 2.1" + hashes = [ + "h1:BRvNNW/32RAFXRcEAovtJWVbFt8zesIKkQm2N0GBCn8=", + "zh:0e99d1b3710f30df818cd662e2ea7b2005cec2a0c64cedd31c459b214f45e9b5", + "zh:43a97fd341c66113aeecfa2f976eb8f5e0b83a6a8824828fac2afef2682d9957", + "zh:501aa0ed294c7befb8bf6e00fd9d8cf4055e1135cb8872f83488ebcde9a89985", + "zh:51862aeed0f28092c877a5f64ddac55b8f6c05cf5278fb51afab5d20b3819934", + "zh:52c22bf8621a120080c7c8c11bfab51678ab9dc3e5c88dc89df8b2c0434a8c89", + "zh:658cce07951f8ba8f170b71198c198e526a82ba56cb74d27b24adf2574eb07b2", + "zh:80a7db37b00c5d9054f68dc62283ce11dcc83b714d550fc56e6b8544bc01d1a8", + "zh:935dd4f4995286c871162bb96ebb49c2d80ef09f2225be62a4ef06c0fcbd72d4", + "zh:af89f57dc41c4d09fd9b7d1277e5ad1d4989cd672f3e58e1891d59020015d21a", + "zh:d45870cf06ed9910d5956175c996cc7bb677f3a8edd94c66a48a3fb93c8d2a84", + "zh:de96c5fadb346adb5e39ea8901c93335b0f3408dd8d477a996b4eb845a8d2343", + "zh:e0f3c179975b7fa5898ebe9cc70621f9da301a14e5262399b074ccb675308bd3", + ] +} diff --git a/aws-ecs/Makefile b/aws-ecs/Makefile new file mode 100644 index 00000000..7fa2f313 --- /dev/null +++ b/aws-ecs/Makefile @@ -0,0 +1,48 @@ +SHELL:=/bin/bash +AWS_DEFAULT_REGION?=ap-southeast-2 + +TERRAFORM_VERSION=1.0.0 +TERRAFORM=docker run --rm -v "${PWD}:/work" -v "${HOME}:/root" -e AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) -e http_proxy=$(http_proxy) --net=host -w /work hashicorp/terraform:$(TERRAFORM_VERSION) + +TERRAFORM_DOCS=docker run --rm -v "${PWD}:/work" tmknom/terraform-docs + +CHECKOV=docker run --rm -v "${PWD}:/work" bridgecrew/checkov + +TFSEC=docker run --rm -v "${PWD}:/work" liamg/tfsec + +DIAGRAMS=docker run --rm -v "${PWD}:/work" figurate/diagrams python + +NAME=$(wordlist 2, $(words $(MAKECMDGOALS)), $(MAKECMDGOALS)) + +REGISTRY?=bedrock +TAGS?=latest +BUILD_ARGS?= + +.PHONY: all build tag push + +all: test build + +clean: + docker rmi $(REGISTRY)/ecs-task-definition + +validate: + $(TERRAFORM) init -upgrade && $(TERRAFORM) validate + +test: validate + $(CHECKOV) -d /work + $(TFSEC) /work + +build: + docker build -t $(REGISTRY)/ecs-task-definition ${BUILD_ARGS} --build-arg TERRAFORM_VERSION=$(TERRAFORM_VERSION) --build-arg HTTP_PROXY=${http_proxy} --network=host . + +tag: build + echo $(TAGS) | tr "/," "-\n" | xargs -n1 -I % docker tag $(REGISTRY)/ecs-task-definition $(REGISTRY)/ecs-task-definition:% + +push: tag + echo $(TAGS) | tr "/," "-\n" | xargs -n1 -I % docker push $(REGISTRY)/ecs-task-definition:% + +docs: diagram + $(TERRAFORM_DOCS) markdown ./ >./README.md + +format: + $(TERRAFORM) fmt -list=true ./ diff --git a/aws-ecs/README.md b/aws-ecs/README.md new file mode 100644 index 00000000..20720935 --- /dev/null +++ b/aws-ecs/README.md @@ -0,0 +1,4 @@ +# Elastic Container Service (ECS) Blueprints + +This directory contains blueprints associated with maintaining +ECS services. diff --git a/aws-ecs/autoscaling/README.md b/aws-ecs/autoscaling/README.md new file mode 100644 index 00000000..bdc88999 --- /dev/null +++ b/aws-ecs/autoscaling/README.md @@ -0,0 +1,15 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| autoscale\_desired | Minimum number of EC2 nodes to attach to the cluster | string | `0` | no | +| autoscale\_max | Maximum number of EC2 nodes to attach to the cluster | string | `0` | no | +| autoscale\_min | Minimum number of EC2 nodes to attach to the cluster | string | `0` | no | +| cloudformation\_path | The root path to cloudformation templates | string | `cloudformation` | no | +| cluster\_name | Name of the ECS cluster | string | - | yes | +| image\_name | AWS image for bastion instance | string | `amzn2-ami-ecs-hvm-*` | no | +| image\_owner | AMI image owner (leave blank for current account) | string | `591542846629` | no | +| instance\_type | AWS instance type for ECS nodes | string | `t3.micro` | no | +| region | AWS default region | string | - | yes | +| spot\_price | A non-zero value indicates a maximum spot price | string | `0` | no | + diff --git a/aws-ecs/autoscaling/cloudformation/ecs_autoscaling.yml b/aws-ecs/autoscaling/cloudformation/ecs_autoscaling.yml new file mode 100644 index 00000000..9fa4af2e --- /dev/null +++ b/aws-ecs/autoscaling/cloudformation/ecs_autoscaling.yml @@ -0,0 +1,183 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning EC2 nodes for an ECS cluster. + +Parameters: + VpcId: + Type: AWS::EC2::VPC::Id + Description: VPC used to provision cluster + ClusterName: + Type: String + Description: The ECS cluster name + EcsLoadBalancerSG: + Type: AWS::EC2::SecurityGroup::Id + Description: Security group for cluster load balancer + BastionSG: + Type: AWS::EC2::SecurityGroup::Id + Description: Security group for bastion host + ClusterImageId: + Type: AWS::EC2::Image::Id + Description: Cluster AMI ID + InstanceType: + Type: String + Description: EC2 instance type + SpotPrice: + Type: String + Description: The maximum price for Spot EC2 instances (zero to disable Spot) + AvailabilityZones: + Type: List + Description: Applicable availability zones + AutoScalingSubnets: + Type: List + Description: Applicable subnets for the cluster autoscaling group + AutoscaleMin: + Type: Number + Description: Minimum nodes that can be launched in the cluster + AutoscaleMax: + Type: Number + Description: Maximum nodes that can be launched in the cluster + AutoscaleDesired: + Type: Number + Description: Desired number of nodes launched in the cluster + +Conditions: + SpotClusterEnabled: + !Not [!Equals [!Ref SpotPrice, '0']] + +Resources: + EcsClusterEC2SG: + Type: AWS::EC2::SecurityGroup + Properties: + GroupName: !Sub ${ClusterName}-SG + GroupDescription: Security group for ECS cluster + VpcId: !Ref VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 32768 + ToPort: 61000 + SourceSecurityGroupId: !Ref EcsLoadBalancerSG + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + SourceSecurityGroupId: !Ref BastionSG + Tags: + - Key: Name + Value: !Sub ${ClusterName}-SG + + EcsClusterEC2Role: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - sts:AssumeRole + Principal: + Service: + - ec2.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role + Policies: + - PolicyName: !Sub ${ClusterName}-EC2Policy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - ecr:* + + EcsClusterEC2InstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Roles: + - !Ref EcsClusterEC2Role + +# 'Fn::Transform': +# Name: AWS::Include +# Parameters: +# Location: !Sub ${BucketUrl}/${NodeType}_launch_config.yml + + EcsClusterEC2LaunchConfig: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ClusterImageId + InstanceType: !Ref InstanceType + InstanceMonitoring: true + IamInstanceProfile: !Ref EcsClusterEC2InstanceProfile + SecurityGroups: + - !Ref EcsClusterEC2SG + AssociatePublicIpAddress: true + UserData: + Fn::Base64: !Sub | + #!/bin/bash + echo ECS_CLUSTER=${ClusterName} >> /etc/ecs/ecs.config + + EcsClusterSpotLaunchConfig: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ImageId + InstanceType: !Ref InstanceType + SpotPrice: !Ref SpotPrice + InstanceMonitoring: true + IamInstanceProfile: !Ref EcsClusterEC2InstanceProfile + SecurityGroups: + - !Ref EcsClusterEC2SG + AssociatePublicIpAddress: true + UserData: + Fn::Base64: !Sub | + #!/bin/bash + echo ECS_CLUSTER=${ClusterName} >> /etc/ecs/ecs.config + + + EcsClusterEC2Asg: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + LaunchConfigurationName: !If [SpotClusterEnabled, !Ref EcsClusterSpotLaunchConfig, !Ref EcsClusterEC2LaunchConfig] + AvailabilityZones: !Ref AvailabilityZones + VPCZoneIdentifier: !Ref AutoScalingSubnets + MinSize: !Ref AutoscaleMin + MaxSize: !Ref AutoscaleMax + DesiredCapacity: !Ref AutoscaleDesired + TerminationPolicies: ['OldestInstance'] + Tags: + - Key: Name + Value: !Ref ClusterName + PropagateAtLaunch: true + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: !If [SpotClusterEnabled, 0, 1] + MaxBatchSize: 1 + PauseTime: PT10M + WaitOnResourceSignals: true + SuspendProcesses: + - HealthCheck + - AlarmNotifications + - ScheduledActions + + EcsClusterEC2ScheduleStart: + Type: AWS::AutoScaling::ScheduledAction + Properties: + AutoScalingGroupName: !Ref EcsClusterEC2Asg + MinSize: !Ref AutoscaleMin + MaxSize: !Ref AutoscaleMax + DesiredCapacity: !Ref AutoscaleDesired + Recurrence: !Ref AsgStartSchedule + + EcsClusterEC2ScheduleStop: + Type: AWS::AutoScaling::ScheduledAction + Properties: + AutoScalingGroupName: !Ref EcsClusterEC2Asg + MinSize: 0 + MaxSize: !Ref AutoscaleMax + DesiredCapacity: 0 + Recurrence: !Ref AsgStopSchedule + + EcsClusterEC2Dashboard: + Type: AWS::CloudWatch::Dashboard + Properties: + DashboardName: !Sub ${ClusterName} + DashboardBody: !Sub | + {} + +Outputs: diff --git a/aws-ecs/autoscaling/export.sh b/aws-ecs/autoscaling/export.sh new file mode 100755 index 00000000..8d8c78df --- /dev/null +++ b/aws-ecs/autoscaling/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-ecs/autoscaling/templates/ecs_default.config b/aws-ecs/autoscaling/templates/ecs_default.config new file mode 100644 index 00000000..c7bee048 --- /dev/null +++ b/aws-ecs/autoscaling/templates/ecs_default.config @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +echo ECS_CLUSTER=${EcsCluster} >> /etc/ecs/ecs.config diff --git a/aws-ecs/autoscaling/templates/ecs_windows.config b/aws-ecs/autoscaling/templates/ecs_windows.config new file mode 100644 index 00000000..be97c6fb --- /dev/null +++ b/aws-ecs/autoscaling/templates/ecs_windows.config @@ -0,0 +1,4 @@ + +Import-Module ECSTools +Initialise-ECSAgent -Cluster '${EcsCluster}' -EnableTaskIAMRole + diff --git a/aws-ecs/aws-ecs/backend.tf b/aws-ecs/aws-ecs/backend.tf new file mode 100644 index 00000000..e69de29b diff --git a/aws-ecs/aws-ecs/default.tfvars.json b/aws-ecs/aws-ecs/default.tfvars.json new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/aws-ecs/aws-ecs/default.tfvars.json @@ -0,0 +1 @@ +{} diff --git a/blueprints/apachesling/server/aws/Dockerfile b/aws-ecs/cluster/Dockerfile similarity index 68% rename from blueprints/apachesling/server/aws/Dockerfile rename to aws-ecs/cluster/Dockerfile index da09159d..6240fff6 100644 --- a/blueprints/apachesling/server/aws/Dockerfile +++ b/aws-ecs/cluster/Dockerfile @@ -4,7 +4,5 @@ ADD export.sh /bootstrap/ ADD *.tf /bootstrap/ ADD cloudformation /bootstrap/cloudformation -ADD templates /bootstrap/templates ENV TF_VAR_cloudformation_path="/bootstrap/cloudformation" -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/aws-ecs/cluster/README.md b/aws-ecs/cluster/README.md new file mode 100644 index 00000000..476086d0 --- /dev/null +++ b/aws-ecs/cluster/README.md @@ -0,0 +1,9 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cloudformation\_path | The root path to cloudformation templates | string | `cloudformation` | no | +| cluster\_name | Name of the ECS cluster | string | - | yes | +| hosted\_zone | A Route53 hosted zone to associate the ECS cluster endpoint | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/aws-ecs/cluster/cloudformation/ecs_cluster.yml b/aws-ecs/cluster/cloudformation/ecs_cluster.yml new file mode 100644 index 00000000..2d3c97ca --- /dev/null +++ b/aws-ecs/cluster/cloudformation/ecs_cluster.yml @@ -0,0 +1,96 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning an ECS cluster. + +Parameters: + VpcId: + Type: AWS::EC2::VPC::Id + Description: VPC used to provision cluster + VpcCidrIp: + Type: String + Description: VPC CIDR IP range + LoadBalancerName: + Type: String + Description: Name of the cluster load balancer + LoadBalancerSubnets: + Type: List + Description: Applicable subnets for the cluster load balancer + TargetDeregistrationDelay: + Type: String + Description: Delay before target deregistration (seconds) + HealthCheckPath: + Type: String + Description: Path used for health check + Default: '/' + CertificateArn: + Type: String + Description: ARN of SSL certificate + ClusterName: + Type: String + Description: The name of the ECS cluster + RouteName: + Type: String + Description: FQDN for the cluster load balancer + BlueGreenEnabled: + Type: String + Description: Indicates whether to enable load balancer support for blue-green deployments + AllowedValues: [true, false] + Default: false + ServiceMeshEnabled: + Type: String + Description: Indicates whether to create a service mesh for the cluster + AllowedValues: [true, false] + Default: false + ServiceDiscoveryName: + Type: String + Description: Domain name for service discovery namespace + +Conditions: + CreateServiceMesh: + !Equals [true, !Ref ServiceMeshEnabled] + +Resources: + EcsCluster: + Type: AWS::ECS::Cluster + #checkov:skip=CKV_AWS_65:Relaxed requirement for container insights + Properties: + ClusterName: !Ref ClusterName + + ClusterNodeSG: + Type: AWS::EC2::SecurityGroup + Properties: + GroupName: !Sub '${ClusterName}-NodeSG' + GroupDescription: !Sub 'Security Group for ${ClusterName} Cluster Nodes' + VpcId: !Ref VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 8443 + ToPort: 8443 + CidrIp: !Ref VpcCidrIp + Description: Allow connections from within the VPC + - IpProtocol: tcp + FromPort: 8080 + ToPort: 8080 + CidrIp: !Ref VpcCidrIp + Description: Allow connections from within the VPC + Tags: + - Key: Name + Value: !Sub '${ClusterName}-NodeSG' + + ServiceMesh: + Type: "AWS::AppMesh::Mesh" + Condition: CreateServiceMesh + Properties: + MeshName: !Ref ClusterName + Spec: + EgressFilter: + Type: "ALLOW_ALL" + + ServiceDiscoveryNamespace: + Type: AWS::ServiceDiscovery::PrivateDnsNamespace + Condition: CreateServiceMesh + Properties: + Name: !Ref ServiceDiscoveryName + Vpc: !Ref VpcId + +#Outputs: diff --git a/aws-ecs/cluster/cloudformation/ecs_cluster_lb.yml b/aws-ecs/cluster/cloudformation/ecs_cluster_lb.yml new file mode 100644 index 00000000..c638f7f0 --- /dev/null +++ b/aws-ecs/cluster/cloudformation/ecs_cluster_lb.yml @@ -0,0 +1,208 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning an ECS cluster. + +Parameters: + VpcId: + Type: AWS::EC2::VPC::Id + Description: VPC used to provision cluster + VpcCidrIp: + Type: String + Description: VPC CIDR IP range + LoadBalancerName: + Type: String + Description: Name of the cluster load balancer + LoadBalancerSubnets: + Type: List + Description: Applicable subnets for the cluster load balancer + TargetDeregistrationDelay: + Type: String + Description: Delay before target deregistration (seconds) + HealthCheckPath: + Type: String + Description: Path used for health check + Default: '/' + CertificateArn: + Type: String + Description: ARN of SSL certificate + ClusterName: + Type: String + Description: The name of the ECS cluster + HostedZoneId: + Type: AWS::Route53::HostedZone::Id + Description: Hosted zone for route53 entry + RouteName: + Type: String + Description: FQDN for the cluster load balancer + BlueGreenEnabled: + Type: String + Description: Indicates whether to enable load balancer support for blue-green deployments + AllowedValues: [true, false] + Default: false + ServiceDiscoveryEnabled: + Type: String + Description: Indicates whether to create a namespace for service discovery + AllowedValues: [true, false] + Default: false + +Conditions: + CreateTestListener: + !Equals [true, !Ref BlueGreenEnabled] + CreateServiceDiscoveryNamespace: + !Equals [true, !Ref ServiceDiscoveryEnabled] + +Resources: + LoadBalancerSG: + Type: AWS::EC2::SecurityGroup + Properties: + GroupName: !Sub ${ClusterName}-alb + GroupDescription: Security group for ECS load balancer + VpcId: !Ref VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 433 + ToPort: 433 + CidrIp: !Ref VpcCidrIp + Description: Allow connections from within the VPC + Tags: + - Key: Name + Value: !Sub ${ClusterName}-alb + + LoadBalancerEgress: + Type: AWS::EC2::SecurityGroupEgress + Properties: + GroupId: !Ref EcsLoadBalancerSG + IpProtocol: -1 + FromPort: -1 + ToPort: -1 + DestinationSecurityGroupId: !Ref ClusterNodeSG + Description: Restrict routing to cluster nodes + + EcsLoadBalancer: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + Properties: + Name: !Ref LoadBalancerName + Subnets: !Ref LoadBalancerSubnets + Scheme: internal + SecurityGroups: + !GetAtt EcsLoadBalancerSG.GroupId + + LoadBalancerTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + Properties: + Port: 80 + Protocol: HTTP + VpcId: !Ref VpcId + HealthCheckPath: !Ref HealthCheckPath + HealthCheckIntervalSeconds: 60 + UnhealthyThresholdCount: 10 + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: !Ref TargetDeregistrationDelay + + LoadBalancerListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + LoadBalancerArn: !Ref EcsLoadBalancer + Port: 443 + Protocol: HTTPS + Certificates: + - CertificateArn: !Ref CertificateArn + DefaultActions: + - Type: forward + TargetGroupArn: !Ref LoadBalancerTargetGroup + + LoadBalancerTestTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + Condition: CreateTestListener + Properties: + Port: 80 + Protocol: HTTP + VpcId: !Ref VpcId + HealthCheckPath: !Ref HealthCheckPath + HealthCheckIntervalSeconds: 60 + UnhealthyThresholdCount: 10 + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: !Ref TargetDeregistrationDelay + + LoadBalancerTestListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Condition: CreateTestListener + Properties: + LoadBalancerArn: !Ref EcsLoadBalancer + Port: 8443 + Protocol: HTTPS + Certificates: + - CertificateArn: !Ref CertificateArn + DefaultActions: + - Type: forward + TargetGroupArn: !Ref LoadBalancerTestTargetGroup + + ClusterRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - sts:AssumeRole + Principal: + Service: + - ecs.amazonaws.com + Policies: + - PolicyName: !Sub ${ClusterName}-Policy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - ec2:AuthorizeSecurityGroupIngress + - ec2:Describe* + - elasticloadbalancing:RegisterInstancesWithLoadBalancer + - elasticloadbalancing:RegisterTargets + - elasticloadbalancing:DeregisterInstancesFromLoadBalancer + - elasticloadbalancing:DeregisterTargets + - elasticloadbalancing:Describe* + + EcsCluster: + Type: AWS::ECS::Cluster + #checkov:skip=CKV_AWS_65:Relaxed requirement for container insights + Properties: + ClusterName: !Ref ClusterName + + ClusterNodeSG: + Type: AWS::EC2::SecurityGroup + Properties: + GroupName: !Sub '${ClusterName}-NodeSG' + GroupDescription: !Sub 'Security Group for ${ClusterName} Cluster Nodes' + VpcId: !Ref VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 32760 + ToPort: 61000 + SourceSecurityGroupId: !Ref EcsLoadBalancerSG + Description: Allow ephemereal ports from load balancer + Tags: + - Key: Name + Value: !Sub '${ClusterName}-NodeSG' + + RecordSet: + Type: AWS::Route53::RecordSet + Properties: + HostedZoneId: !Ref HostedZoneId + Name: !Ref RouteName + Type: A + AliasTarget: + DNSName: !GetAtt EcsLoadBalancer.DNSName + HostedZoneId: !GetAtt EcsLoadBalancer.CanonicalHostedZoneID + + ServiceDiscoveryNamespace: + Type: AWS::ServiceDiscovery::PrivateDnsNamespace + Condition: CreateServiceDiscoveryNamespace + Properties: + Name: !Ref ClusterName + Vpc: !Ref VpcId + +#Outputs: diff --git a/aws-ecs/cluster/efs.tf b/aws-ecs/cluster/efs.tf new file mode 100644 index 00000000..1acb34c2 --- /dev/null +++ b/aws-ecs/cluster/efs.tf @@ -0,0 +1,29 @@ +resource "aws_efs_file_system" "efs" { + count = var.efs_enabled ? 1 : 0 + creation_token = local.env_string + lifecycle_policy { + transition_to_ia = "AFTER_14_DAYS" + } +} + +resource "aws_security_group" "efs" { + vpc_id = data.aws_vpc.tenant.id + description = format("EFS filesystem access for ECS cluster: %s", local.cluster_name) + ingress { + description = "Allow VPC access" + from_port = 0 + protocol = "-1" + to_port = 0 + cidr_blocks = [data.aws_vpc.tenant.cidr_block] + } + tags = { + Name = format("%s-EFS", local.cluster_name) + } +} + +resource "aws_efs_mount_target" "efs" { + for_each = local.efs_subnets + file_system_id = aws_efs_file_system.efs[0].id + subnet_id = each.value + security_groups = [aws_security_group.efs.id] +} diff --git a/aws-ecs/cluster/export.sh b/aws-ecs/cluster/export.sh new file mode 100755 index 00000000..8d8c78df --- /dev/null +++ b/aws-ecs/cluster/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-ecs/cluster/main.tf b/aws-ecs/cluster/main.tf new file mode 100644 index 00000000..8f2c3785 --- /dev/null +++ b/aws-ecs/cluster/main.tf @@ -0,0 +1,42 @@ +/* + * Provision an ECS cluster using CloudFormation stack. + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_role" "cloudformation" { + name = "ecs-cloudformation-role" +} + +data "aws_vpc" "tenant" { + default = var.vpc_default + tags = var.vpc_tags +} + +data "aws_availability_zones" "available_zones" {} + +data "aws_subnet_ids" "tenant" { + vpc_id = data.aws_vpc.tenant.id + // tags { + // Name = "private_subnet_*" + // } +} + +resource "aws_cloudformation_stack" "ecs_cluster" { + name = local.cluster_name + capabilities = ["CAPABILITY_IAM"] + iam_role_arn = data.aws_iam_role.cloudformation.arn + parameters = { + VpcId = data.aws_vpc.tenant.id + VpcCidrIp = data.aws_vpc.tenant.cidr_block + LoadBalancerName = "${local.cluster_name}-alb" + LoadBalancerSubnets = join(",", data.aws_subnet_ids.tenant.ids) + TargetDeregistrationDelay = "" + HealthCheckPath = "/" + CertificateArn = aws_acm_certificate.ssl_certificate.arn + ClusterName = local.cluster_name + ServiceDiscoveryName = local.default_namespace + RouteName = "${local.cluster_name}-${local.account_hash}" + ServiceMeshEnabled = var.servicemesh_enabled ? "true" : "false" + } + template_body = file(format("%s/%s.yml", var.cloudformation_path, var.cluster_template)) +} diff --git a/aws-ecs/cluster/ssl.tf b/aws-ecs/cluster/ssl.tf new file mode 100644 index 00000000..a47bf805 --- /dev/null +++ b/aws-ecs/cluster/ssl.tf @@ -0,0 +1,27 @@ +/* + * Provision a self-signed certificate and import to AWS ACM. + */ +resource "tls_private_key" "ssl_certificate" { + algorithm = "RSA" +} + +resource "tls_self_signed_cert" "ssl_certificate" { + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + ] + key_algorithm = "RSA" + private_key_pem = tls_private_key.ssl_certificate.private_key_pem + validity_period_hours = 2160 // 6 months + subject { + common_name = format("*.%s", local.default_namespace) + organization = "" + country = "" + } +} + +resource "aws_acm_certificate" "ssl_certificate" { + private_key = tls_private_key.ssl_certificate.private_key_pem + certificate_body = tls_self_signed_cert.ssl_certificate.cert_pem +} diff --git a/aws-ecs/cluster/vars.tf b/aws-ecs/cluster/vars.tf new file mode 100644 index 00000000..6532253b --- /dev/null +++ b/aws-ecs/cluster/vars.tf @@ -0,0 +1,62 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} + +variable "cluster_template" { + description = "The CloudFormation template used to create the ECS cluster" + default = "ecs_cluster" +} + +variable "environment" { + description = "The name of the environment associated with the cluster" +} + +variable "context" { + description = "Contextual naming of the ECS cluster (eg. user, service, etc.)" + default = "" +} + +variable "vpc_default" { + description = "Indicate whether to deploy in the default VPC" + default = true +} + +variable "vpc_tags" { + type = map(any) + description = "A map of tags to match on the VPC lookup" + default = {} +} + +variable "private_routing" { + description = "Indicates whether private routing (zone) is enabled" + default = "True" + // validation { + // condition = var.private_routing == "true" || var.private_routing == "false" + // error_message = "Must be a boolean value" + // } +} + +variable "servicemesh_enabled" { + description = "Enable service mesh for the cluster" + default = false +} + +variable "service_namespace" { + description = "The namespace to use for service discovery (leave blank for default namespace)" + default = "" +} + +variable "efs_enabled" { + description = "Enable EFS filesystem for persistent volumes" + default = false +} + +locals { + env_string = var.context == "" ? var.environment : format("%s-%s", var.environment, var.context) + cluster_name = var.context == "" ? format("%s-cluster", var.environment) : format("%s-cluster-%s", var.environment, var.context) + account_hash = substr(sha256(data.aws_caller_identity.current.account_id), -10, 10) + default_namespace = replace(var.service_namespace, "/\\A\\z/", "${local.env_string}.internal") + + efs_subnets = var.efs_enabled ? data.aws_subnet_ids.tenant.ids : [] +} diff --git a/blueprints/bastion/aws/Dockerfile b/aws-ecs/codedeploy/Dockerfile similarity index 100% rename from blueprints/bastion/aws/Dockerfile rename to aws-ecs/codedeploy/Dockerfile diff --git a/aws-ecs/codedeploy/ecs.tf b/aws-ecs/codedeploy/ecs.tf new file mode 100644 index 00000000..fc6596f6 --- /dev/null +++ b/aws-ecs/codedeploy/ecs.tf @@ -0,0 +1,62 @@ +data aws_ecs_cluster ecs_cluster { + cluster_name = local.cluster_name +} + +data aws_subnet_ids vpc_subnets { + vpc_id = data.aws_vpc.tenant.id +} + +data aws_security_groups vpc_security_groups { + vpc_ids = [data.aws_vpc.tenant.id] +} + +data aws_ecr_repository service_image { + name = var.service_image +} + +data template_file task { + template = file(format("%s/task-%s.json", var.template_path, var.task_type)) + vars = { + ServiceName = var.service_name + ServiceImage = var.service_image + ServiceCpu = var.service_cpu + ServiceMemory = var.service_memory + ServicePort = var.service_port + LogRegion = var.region + LogGroup = aws_cloudwatch_log_group.service_logs.name + LogStreamPrefix = "ecs" + } +} + +resource aws_cloudwatch_log_group service_logs { + name = format("/ecs/%s", local.service_id) + retention_in_days = 7 +} + +resource aws_ecs_task_definition service_task { + family = local.service_id + network_mode = var.network_mode + container_definitions = data.template_file.task.rendered +} + +resource aws_ecs_service service { + name = local.service_id + task_definition = aws_ecs_task_definition.service_task.arn + cluster = data.aws_ecs_cluster.ecs_cluster.id + desired_count = 1 + launch_type = var.launch_type + network_configuration { + subnets = var.network_mode == "awsvpc" ? data.aws_subnet_ids.vpc_subnets.ids : null + security_groups = var.network_mode == "awsvpc" ? data.aws_security_groups.vpc_security_groups.ids : null + } + + deployment_controller { + type = "CODE_DEPLOY" + } + + load_balancer { + container_name = var.service_name + container_port = var.service_port + target_group_arn = aws_lb_target_group.ecs_blue.arn + } +} diff --git a/aws-ecs/codedeploy/export.sh b/aws-ecs/codedeploy/export.sh new file mode 100755 index 00000000..4d8769c8 --- /dev/null +++ b/aws-ecs/codedeploy/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/templates ./ diff --git a/aws-ecs/codedeploy/main.tf b/aws-ecs/codedeploy/main.tf new file mode 100644 index 00000000..64d5e0de --- /dev/null +++ b/aws-ecs/codedeploy/main.tf @@ -0,0 +1,122 @@ +data "aws_iam_role" ecs_deploy { + name = "bedrock-ecs-codedeploy" +} + +data aws_vpc tenant { + default = var.vpc_default + tags = var.vpc_tags +} + +data "aws_lb" ecs_cluster { + name = "${local.cluster_name}-alb" +} + +data aws_lb_listener ecs_blue { + load_balancer_arn = data.aws_lb.ecs_cluster.arn + port = 443 +} + +data aws_lb_listener ecs_green { + load_balancer_arn = data.aws_lb.ecs_cluster.arn + port = 8443 +} + +resource aws_lb_target_group ecs_blue { + deregistration_delay = 0 + health_check { + interval = 60 + unhealthy_threshold = 10 + path = "/" + matcher = "200" + } + port = var.service_port + protocol = "HTTP" + vpc_id = data.aws_vpc.tenant.id + name = local.service_id + target_type = var.launch_type == "FARGATE" ? "ip" : "instance" +} + +resource "aws_lb_listener_rule" ecs_blue { + listener_arn = data.aws_lb_listener.ecs_blue.arn + action { + type = "forward" + target_group_arn = aws_lb_target_group.ecs_blue.arn + } + condition { + field = "host-header" + values = [] + } +} + +resource aws_lb_target_group ecs_green { + deregistration_delay = 0 + health_check { + interval = 60 + unhealthy_threshold = 10 + path = "/" + matcher = "200" + } + port = var.service_port + protocol = "HTTP" + vpc_id = data.aws_vpc.tenant.id + name = format("green-%s", local.service_id) + target_type = var.launch_type == "FARGATE" ? "ip" : "instance" +} + +resource "aws_lb_listener_rule" ecs_green { + listener_arn = data.aws_lb_listener.ecs_green.arn + action { + type = "forward" + target_group_arn = aws_lb_target_group.ecs_green.arn + } + condition { + field = "host-header" + values = [] + } +} + +resource aws_codedeploy_app ecs_deploy { + name = local.service_id + compute_platform = "ECS" +} + +resource aws_codedeploy_deployment_group service_deploy_group { + app_name = aws_codedeploy_app.ecs_deploy.name + deployment_group_name = local.service_id + deployment_config_name = "CodeDeployDefault.ECSAllAtOnce" + service_role_arn = data.aws_iam_role.ecs_deploy.arn + + blue_green_deployment_config { + deployment_ready_option { + action_on_timeout = "CONTINUE_DEPLOYMENT" + } + terminate_blue_instances_on_deployment_success { + action = "TERMINATE" + termination_wait_time_in_minutes = 5 + } + } + + deployment_style { + deployment_option = "WITH_TRAFFIC_CONTROL" + deployment_type = "BLUE_GREEN" + } + + ecs_service { + cluster_name = local.cluster_name + service_name = aws_ecs_service.service.name + } + + load_balancer_info { + target_group_pair_info { + prod_traffic_route { + listener_arns = [data.aws_lb_listener.ecs_blue.arn] + } + target_group { + name = aws_lb_target_group.ecs_blue.name + } + target_group { + name = aws_lb_target_group.ecs_green.name + } + } + } +} diff --git a/aws-ecs/codedeploy/templates/task-default.json b/aws-ecs/codedeploy/templates/task-default.json new file mode 100644 index 00000000..7022393e --- /dev/null +++ b/aws-ecs/codedeploy/templates/task-default.json @@ -0,0 +1,23 @@ +[ + { + "name": "${ServiceName}", + "image": "${ServiceImage}", + "cpu": ${ServiceCpu}, + "memory": ${ServiceMemory}, + "essential": true, + "portMappings": [ + { + "containerPort": ${ServicePort}, + "hostPort": 0 + } + ], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-region": "${LogRegion}", + "awslogs-group": "${LogGroup}", + "awslogs-stream-prefix": "${LogStreamPrefix}" + } + } + } +] diff --git a/aws-ecs/codedeploy/templates/task-fargate.json b/aws-ecs/codedeploy/templates/task-fargate.json new file mode 100644 index 00000000..34e9e326 --- /dev/null +++ b/aws-ecs/codedeploy/templates/task-fargate.json @@ -0,0 +1,21 @@ +[ + { + "name": "${ServiceName}", + "image": "${ServiceImage}", + "essential": true, + "portMappings": [ + { + "containerPort": ${ServicePort}, + "hostPort":, ${ServicePort} + } + ], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-region": "${LogRegion}", + "awslogs-group": "${LogGroup}", + "awslogs-stream-prefix": "${LogStreamPrefix}" + } + } + } +] diff --git a/aws-ecs/codedeploy/templates/task-with-config.json b/aws-ecs/codedeploy/templates/task-with-config.json new file mode 100644 index 00000000..0d43a378 --- /dev/null +++ b/aws-ecs/codedeploy/templates/task-with-config.json @@ -0,0 +1,34 @@ +[ + { + "name": "${ServiceName}", + "image": "${ServiceImage}", + "cpu": ${ServiceCpu}, + "memory": ${ServiceMemory}, + "essential": true, + "portMappings": [ + { + "containerPort": ${ServicePort}, + "hostPort": 0 + } + ], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-region": "${LogRegion}", + "awslogs-group": "${LogGroup}", + "awslogs-stream-prefix": "${LogStreamPrefix}" + } + }, + "volumesFrom": [ + { + "sourceContainer": "${ServiceConfigName}", + "readOnly": true + } + ] + }, + { + "name": "${ServiceConfigName}", + "image": "${ServiceConfigImage}", + "essential": false + } +] diff --git a/aws-ecs/codedeploy/vars.tf b/aws-ecs/codedeploy/vars.tf new file mode 100644 index 00000000..eb00ee98 --- /dev/null +++ b/aws-ecs/codedeploy/vars.tf @@ -0,0 +1,70 @@ +variable template_path { + description = "The root path to container templates" + default = "templates" +} + +variable environment { + description = "Environment categorisation used in the naming of the ECS cluster" +} + +variable "region" { + description = "AWS default region" +} + +variable context { + description = "Contextual categorisation used in the naming of the ECS cluster" + default = "" +} + +variable vpc_default { + description = "Boolean value to indicate whether the matched VPC should be default for the region" + default = "true" +} + +variable vpc_tags { + type = "map" + description = "A map of tags to match on the VPC lookup" + default = {} +} + +variable task_type { + description = "The type of task definition to apply. Valid values include: default, fargate, with-config" + default = "default" +} + +variable service_name { + description = "Name of the ECS service" +} + +variable service_port { + description = "Container port of the ECS service" +} + +variable service_image { + description = "Docker image used for the service" +} + +variable service_cpu { + description = "Minimum CPU units required (1024 = 1 vCPU)" + default = 256 +} + +variable service_memory { + description = "Minimum amount of memory required (in MiB)" + default = 512 +} + +variable network_mode { + description = "Network mode used by service containers" + default = "bridge" +} + +variable launch_type { + description = "Type of deployment infrastructure (EC2 or FARGATE)" + default = "EC2" +} + +locals { + service_id = var.context != "" ? format("%s-%s-%s", var.environment, replace(var.service_name, "/", "-"), var.context) : format("%s-%s", var.environment, replace(var.service_name, "/", "-")) + cluster_name = var.context != "" ? format("%s-cluster-%s", var.environment, var.context) : format("%s-cluster", var.environment) +} diff --git a/aws-ecs/provider.tf b/aws-ecs/provider.tf new file mode 100644 index 00000000..1a383be6 --- /dev/null +++ b/aws-ecs/provider.tf @@ -0,0 +1,14 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/ecs-blueprint-role" + } +} + +provider "tls" { + version = "~> 2.1" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/apachesolr/server/aws/Dockerfile b/aws-ecs/service/Dockerfile similarity index 68% rename from blueprints/apachesolr/server/aws/Dockerfile rename to aws-ecs/service/Dockerfile index da09159d..6240fff6 100644 --- a/blueprints/apachesolr/server/aws/Dockerfile +++ b/aws-ecs/service/Dockerfile @@ -4,7 +4,5 @@ ADD export.sh /bootstrap/ ADD *.tf /bootstrap/ ADD cloudformation /bootstrap/cloudformation -ADD templates /bootstrap/templates ENV TF_VAR_cloudformation_path="/bootstrap/cloudformation" -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/aws-ecs/service/README.md b/aws-ecs/service/README.md new file mode 100644 index 00000000..3b03f4bb --- /dev/null +++ b/aws-ecs/service/README.md @@ -0,0 +1,9 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cloudformation\_path | The root path to cloudformation templates | string | `cloudformation` | no | +| cluster\_name | Name of the ECS cluster | string | - | yes | +| region | AWS default region | string | - | yes | +| service\_name | Name of the ECS service | string | - | yes | + diff --git a/aws-ecs/service/cloudformation/fargate/service_alb.yml b/aws-ecs/service/cloudformation/fargate/service_alb.yml new file mode 100644 index 00000000..6e4a1fe9 --- /dev/null +++ b/aws-ecs/service/cloudformation/fargate/service_alb.yml @@ -0,0 +1,240 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning an ECS service. + +Parameters: + ServiceName: + Type: String + Description: The ECS service identifier + ClusterName: + Type: String + Description: The name of the ECS cluster + Environment: + Type: String + Description: The name of the environment represented by the VPC + +Resources: + EcsService: + Type: AWS::ECS::Service + Properties: + ServiceName: !Join ['-', [!Ref Environment, !Ref ServiceName]] + Cluster: !Ref ClusterName + LaunchType: FARGATE + DesiredCount: !Ref AutoScalingDesired + HealthCheckGracePeriodSeconds: !Ref HealthCheckGracePeriod + LoadBalancers: + - ContainerName: !Join ['-', !Split ['/', !Ref ServiceImage]] + ContainerPort: !Ref ContainerPort + TargetGroupArn: !Ref AlbTargetGroup + DeploymentConfiguration: + MaximumPercent: 200 + MinimumHealthyPercent: 50 + TaskDefinition: !Ref TaskDefinition + NetworkConfiguration: + AwsvpcConfiguration: + SecurityGroups: + - !Ref ServiceSecurityGroup + Subnets: !Ref PrivateSubnetIds + + ServiceSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupName: !Join ['-', !Ref Environment, !Ref ServiceName] + GroupDescription: !Join [' ', ['Allow traffic from ALB to', !Ref ServiceName]] + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: !Ref ServicePort + ToPort: !Ref ServicePort + SourceSecurityGroupId: !Ref AlbSg + VpcId: !Ref VpcId + Tags: + - Key: Name + Value: !Sub '${AWS::StackName}-SG' + + ServiceTaskRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Effect: Allow + Action: ['sts:AssumeRole'] + Principal: + Service: + - ecs-tasks.amazonaws.com + + ServiceExecutionRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Effect: Allow + Action: ['sts:AssumeRole'] + Principal: + Service: + - ecs-tasks.amazonaws.com + - ecs.amazonaws.com + - ec2.amazonaws.com + + TaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Join ['-', [!Ref Environment, !Ref ServiceName]] + TaskRoleArn: !Ref ServiceTaskRole + RequiresCompatibilities: + - EC2 + - FARGATE + NetworkMode: awsvpc + ExecutionRole: !Ref ServiceExecutionRole + ContainerDefinitions: + - Name: !Join ['-', !Split ['/', !Ref ServiceImage]] + Cpu: !Ref CpuUnits + Memory: !Ref MemoryUnits + Environment: + - Name: 'HOSTENV' + Value: !Ref Environment + Essential: true + PortMappings: + - HostPort: !Ref ServicePort + Protocol: tcp + ContainerPort: !Ref ServicePort + Image: !Join [':', [!Ref ServiceImage, !Ref ServiceTag]] + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWSRegion + awslogs-group: !Ref TaskLogGroup + awslogs-stream-prefix: ecs + + TaskLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub 'ecs/${Environment}-${ServiceName}' + RetentionInDays: 7 + + AlbTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + Properties: + TargetType: ip + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: !Ref DeregistrationDelay + HealthCheckIntervalSeconds: 60 + UnhealthyThresholdCount: 10 + HealthCheckPath: !Ref HealthCheckPath + Matcher: + HttpCode: !Ref HealthCheckSuccessCodes + Port: !Ref ServicePort + Protocol: HTTP + VpcId: !Ref VpcId + Name: !Sub '${AWS::StackName}-TG' + + + AlbListenerRule: + Type: AWS::ElasticLoadBalancingV2::ListenerRule + Properties: + Actions: + - Type: forward + TargetGroupArn: !Ref AlbTargetGroup + Conditions: + - Field: host-header + Values: [!Ref Hostname] + ListenerArn: !Ref AlbListenerArn + Priority: !Ref AlbListenerRulePriority + + + AutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Effect: Allow + Action: ['sts:AssumeRole'] + Principal: + Service: + - application-autoscaling.amazonaws.com + + AutoScalingTarget: + Type: AWS::ApplicationAutoScaling::ScalableTarget + DependsOn: EcsService + Properties: + MinCapacity: !Ref AutoScalingMin + MaxCapacity: !Ref AutoScalingMax + ResourceId: !Join ['', ['service/', !Ref ClusterName, '-', !GetAtt [EcsService, Name]]] + RoleARN: !GetAtt [AutoScalingRole, Arn] + ScalableDimension: ecs:service:DesiredCount + ServiceNamespace: ecs + + ScaleOutPolicy: + Type: AWS::ApplicationAutoScaling::ScalingPolicy + Properties: + PolicyName: ServiceScaleOutPolicy + PolicyType: StepScaling + ScalingTargetId: AutoScalingTarget + StepScalingPolicyConfiguration: + AdjustmentType: ChangeInCapacity + Cooldown: 300 + MetricAggregationType: Average + StepAdjustments: + - MetricIntervalLowerBound: 0 + ScalingAdjustment: 1 + + ScaleInPolicy: + Type: AWS::ApplicationAutoScaling::ScalingPolicy + Properties: + PolicyName: ServiceScaleInPolicy + PolicyType: StepScaling + ScalingTargetId: AutoScalingTarget + StepScalingPolicyConfiguration: + AdjustmentType: ChangeInCapacity + Cooldown: 300 + MetricAggregationType: Average + StepAdjustments: + - MetricIntervalLowerBound: 0 + ScalingAdjustment: -1 + + CloudWatchDashboard: + Type: AWS::CloudWatch::Dashboard + Properties: + DashboardName: !GetAtt [EcsService, Name] + DashboardBody: !Sub | + {} + + ServiceCpuHighAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + EvaluationPeriods: 5 + Statistic: Average + Threshold: 40 + AlarmDescription: CPU utilisation is less than 40 percent + Period: 60 + AlarmActions: + - !Ref ScaleInPolicy + Namespace: AWS/ECS + Dimensions: + - Name: ClusterName + Value: !Ref ClusterName + - Name: ServiceName + Value: !GetAtt [EcsService, Name] + ComparisonOperator: LessThanThreshold + MetricName: CPUUtilization + + ServiceCpuLowAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + EvaluationPeriods: 5 + Statistic: Average + Threshold: 60 + AlarmDescription: CPU utilisation is greater than 60 percent + Period: 60 + AlarmActions: + - !Ref ScaleOutPolicy + Namespace: AWS/ECS + Dimensions: + - Name: ClusterName + Value: !Ref ClusterName + - Name: ServiceName + Value: !GetAtt [EcsService, Name] + ComparisonOperator: GreaterThanThreshold + MetricName: CPUUtilization + +Outputs: diff --git a/aws-ecs/service/cloudformation/service_instance.yml b/aws-ecs/service/cloudformation/service_instance.yml new file mode 100644 index 00000000..140f5ca7 --- /dev/null +++ b/aws-ecs/service/cloudformation/service_instance.yml @@ -0,0 +1,218 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning an ECS service. + +Parameters: + ServiceName: + Type: String + Description: The ECS service identifier + ClusterName: + Type: String + Description: The name of the ECS cluster + Environment: + Type: String + Description: The name of the environment represented by the VPC + NetworkMode: + Description: Network configuration of service tasks + Default: bridge + AllowedValues: [none, bridge, awsvpc, host] + ServiceDiscoveryNamespace: + Type: String + Description: The namespace to register service discovery endpoint + +Resources: + EcsService: + Type: AWS::ECS::Service + Properties: + ServiceName: !Join ['-', [!Ref Environment, !Ref ServiceName]] + Cluster: !Ref ClusterName + DesiredCount: !Ref AutoScalingDesired + HealthCheckGracePeriodSeconds: !Ref HealthCheckGracePeriod + LoadBalancers: + - ContainerName: !Join ['-', !Split ['/', !Ref ServiceImage]] + ContainerPort: !Ref ContainerPort + TargetGroupArn: !Ref AlbTargetGroup + DeploymentConfiguration: + MaximumPercent: 200 + MinimumHealthyPercent: 50 + TaskDefinition: !Ref TaskDefinition + + ServiceTaskRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Effect: Allow + Action: ['sts:AssumeRole'] + Principal: + Service: + - ecs-tasks.amazonaws.com + + TaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Join ['-', [!Ref Environment, !Ref ServiceName]] + TaskRoleArn: !Ref ServiceTaskRole + NetworkMode: !Ref NetworkMode + ContainerDefinitions: + - Name: !Join ['-', !Split ['/', !Ref ServiceImage]] + Cpu: !Ref CpuUnits + Memory: !Ref MemoryUnits + Environment: + - Name: 'HOSTENV' + Value: !Ref Environment + Essential: true + PortMappings: + - HostPort: 0 + Protocol: tcp + ContainerPort: !Ref ServicePort + Image: !Join [':', [!Ref ServiceImage, !Ref ServiceTag]] + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWSRegion + awslogs-group: !Ref TaskLogGroup + awslogs-stream-prefix: ecs + + TaskLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Join ['', ['/', !Join ['/', ['ecs', !Join ['-', [!Ref Environment, !Ref ServiceName]]]]]] + RetentionInDays: 7 + + AlbTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + Properties: + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: !Ref DeregistrationDelay + HealthCheckIntervalSeconds: 60 + UnhealthyThresholdCount: 10 + HealthCheckPath: !Ref HealthCheckPath + Matcher: + HttpCode: !Ref HealthCheckSuccessCodes + Port: !Ref ServicePort + Protocol: HTTP + VpcId: !Ref VpcId + Name: !Sub '${AWS::StackName}-TG' + + + AlbListenerRule: + Type: AWS::ElasticLoadBalancingV2::ListenerRule + Properties: + Actions: + - Type: forward + TargetGroupArn: !Ref AlbTargetGroup + Conditions: + - Field: host-header + Values: [!Ref Hostname] + ListenerArn: !Ref AlbListenerArn + Priority: !Ref AlbListenerRulePriority + + + AutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Effect: Allow + Action: ['sts:AssumeRole'] + Principal: + Service: + - application-autoscaling.amazonaws.com + + AutoScalingTarget: + Type: AWS::ApplicationAutoScaling::ScalableTarget + DependsOn: EcsService + Properties: + MinCapacity: !Ref AutoScalingMin + MaxCapacity: !Ref AutoScalingMax + ResourceId: !Join ['', ['service/', !Ref ClusterName, '-', !GetAtt [EcsService, Name]]] + RoleARN: !GetAtt [AutoScalingRole, Arn] + ScalableDimension: ecs:service:DesiredCount + ServiceNamespace: ecs + + ScaleOutPolicy: + Type: AWS::ApplicationAutoScaling::ScalingPolicy + Properties: + PolicyName: ServiceScaleOutPolicy + PolicyType: StepScaling + ScalingTargetId: AutoScalingTarget + StepScalingPolicyConfiguration: + AdjustmentType: ChangeInCapacity + Cooldown: 300 + MetricAggregationType: Average + StepAdjustments: + - MetricIntervalLowerBound: 0 + ScalingAdjustment: 1 + + ScaleInPolicy: + Type: AWS::ApplicationAutoScaling::ScalingPolicy + Properties: + PolicyName: ServiceScaleInPolicy + PolicyType: StepScaling + ScalingTargetId: AutoScalingTarget + StepScalingPolicyConfiguration: + AdjustmentType: ChangeInCapacity + Cooldown: 300 + MetricAggregationType: Average + StepAdjustments: + - MetricIntervalLowerBound: 0 + ScalingAdjustment: -1 + + CloudWatchDashboard: + Type: AWS::CloudWatch::Dashboard + Properties: + DashboardName: !GetAtt [EcsService, Name] + DashboardBody: !Sub | + {} + + ServiceCpuHighAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + EvaluationPeriods: 5 + Statistic: Average + Threshold: 40 + AlarmDescription: CPU utilisation is less than 40 percent + Period: 60 + AlarmActions: + - !Ref ScaleInPolicy + Namespace: AWS/ECS + Dimensions: + - Name: ClusterName + Value: !Ref ClusterName + - Name: ServiceName + Value: !GetAtt [EcsService, Name] + ComparisonOperator: LessThanThreshold + MetricName: CPUUtilization + + ServiceCpuLowAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + EvaluationPeriods: 5 + Statistic: Average + Threshold: 60 + AlarmDescription: CPU utilisation is greater than 60 percent + Period: 60 + AlarmActions: + - !Ref ScaleOutPolicy + Namespace: AWS/ECS + Dimensions: + - Name: ClusterName + Value: !Ref ClusterName + - Name: ServiceName + Value: !GetAtt [EcsService, Name] + ComparisonOperator: GreaterThanThreshold + MetricName: CPUUtilization + + ServiceDiscoveryEndpoint: + Type: AWS::ServiceDiscovery::Service + Properties: + Name: !Ref ServiceName + DnsConfig: + DnsRecords: [{Type: A, TTL: "10"}] + NamespaceId: !Ref ServiceDiscoveryNamespace + HealthCheckCustomConfig: + FailureThreshold: 1 + +Outputs: diff --git a/aws-ecs/service/export.sh b/aws-ecs/service/export.sh new file mode 100755 index 00000000..8d8c78df --- /dev/null +++ b/aws-ecs/service/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-ecs/service/main.tf b/aws-ecs/service/main.tf new file mode 100644 index 00000000..b6f86598 --- /dev/null +++ b/aws-ecs/service/main.tf @@ -0,0 +1,34 @@ +data "aws_iam_role" "serviceadmin" { + name = "bedrock-ecs-service-admin" +} + +data "aws_alb" "cluster_alb" { + name = "${var.cluster_name}" +} + +data "aws_route53_zone" "internal" { + name = "internal." + private_zone = true +} + +resource "aws_cloudformation_stack" "ecs_service" { + name = "${var.service_name}-ecs-service" + iam_role_arn = "${data.aws_iam_role.serviceadmin.arn}" + parameters { + ClusterName = "" + HostedZoneId = "" + RouteName = "" + } + template_body = "${file(format("%s/ecs_service.yml", var.cloudformation_path))}" +} + +resource "aws_route53_record" "ecs_service" { + name = "${local.env_string}-${var.service_name}" + type = "A" + zone_id = "${data.aws_route53_zone.internal.zone_id}" + alias { + evaluate_target_health = false + name = "${data.aws_alb.cluster_alb.dns_name}" + zone_id = "${data.aws_alb.cluster_alb.zone_id}" + } +} diff --git a/blueprints/aws/rds/snapshot/provider.tf b/aws-ecs/service/provider.tf similarity index 100% rename from blueprints/aws/rds/snapshot/provider.tf rename to aws-ecs/service/provider.tf diff --git a/aws-ecs/service/vars.tf b/aws-ecs/service/vars.tf new file mode 100644 index 00000000..d8b41d28 --- /dev/null +++ b/aws-ecs/service/vars.tf @@ -0,0 +1,16 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} + +variable "service_name" { + description = "Name of the ECS service" +} + +variable "cluster_name" { + description = "Name of the ECS cluster" +} + +locals { + env_string = "" +} diff --git a/blueprints/nginx/reverseproxy/aws/Dockerfile b/aws-ecs/spotfleet/Dockerfile similarity index 68% rename from blueprints/nginx/reverseproxy/aws/Dockerfile rename to aws-ecs/spotfleet/Dockerfile index da09159d..6240fff6 100644 --- a/blueprints/nginx/reverseproxy/aws/Dockerfile +++ b/aws-ecs/spotfleet/Dockerfile @@ -4,7 +4,5 @@ ADD export.sh /bootstrap/ ADD *.tf /bootstrap/ ADD cloudformation /bootstrap/cloudformation -ADD templates /bootstrap/templates ENV TF_VAR_cloudformation_path="/bootstrap/cloudformation" -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/aws-ecs/spotfleet/cloudformation/ecs_spotfleet.yml b/aws-ecs/spotfleet/cloudformation/ecs_spotfleet.yml new file mode 100644 index 00000000..bba90d27 --- /dev/null +++ b/aws-ecs/spotfleet/cloudformation/ecs_spotfleet.yml @@ -0,0 +1,99 @@ +AWSTemplateFormatVersion: '2010-09-09' + +Description: Provision SpotFleet for an ECS Cluster + +Parameters: + ClusterName: + Type: String + Description: Name of the ECS Cluster + AmiId: + Type: AWS::EC2::Image::Id + Description: SpotFleet EC2 Image + InstanceType: + Type: String + Description: EC2 Instance Type + AvailabilityZones: + Type: List + Description: AZs used to deploy EC2 instances + ClusterNodeSubnets: + Type: List + Description: VPC Subnets used to provision network interfaces for cluster nodes + ClusterNodesMin: + Type: Number + Description: Minimum number of nodes required for the cluster + ClusterNodesMax: + Type: Number + Description: Maximum number of nodes allowed for the cluster + ClusterNodesDesired: + Type: Number + Description: Desired number of nodes suggested for the cluster + ClusterNodeSG: + Type: AWS::EC2::SecurityGroup::Id + Description: Security Group applied to cluster nodes + ClusterNodeInstanceProfile: + Type: String + Description: EC2 Instance Profile attached to cluster nodes + ClusterSpotFleetRole: + Type: String + Description: IAM Role ARN for cluster SpotFleet + ClusterAutoscalingRole: + Type: String + Description: IAM Role ARN for cluster autoscaling + +Resources: + LaunchTemplate: + Type: AWS::EC2::LaunchTemplate + Properties: + LaunchTemplateData: + ImageId: !Ref AmiId + InstanceType: !Ref InstanceType + IamInstanceProfile: + Arn: !Ref ClusterNodeInstanceProfile + SecurityGroupIds: + - !Ref ClusterNodeSG + TagSpecifications: + - ResourceType: instance + Tags: + - Key: Name + Value: !Ref ClusterName + UserData: + Fn::Base64: !Sub | + #!/bin/bash + echo 'ECS_CLUSTER=${ClusterName}' >> /etc/ecs/ecs.config + + SpotFleet: + Type: AWS::EC2::SpotFleet + Properties: + SpotFleetRequestConfigData: + IamFleetRole: !Ref ClusterSpotFleetRole + TargetCapacity: !Ref ClusterNodesDesired + LaunchTemplateConfigs: + - LaunchTemplateSpecification: + LaunchTemplateId: !Ref LaunchTemplate + Version: !GetAtt LaunchTemplate.LatestVersionNumber + Overrides: + - SubnetId: !Join [',', !Ref ClusterNodeSubnets] + +# AutoscalingTarget: +# Type: AWS::ApplicationAutoScaling::ScalableTarget +# Properties: +# +# ScaleOutPolicy: +# Type: AWS::ApplicationAutoScaling::ScalingPolicy +# Properties: +# +# ScaleInPolicy: +# Type: AWS::ApplicationAutoScaling::ScalingPolicy +# Properties: +# +# CloudWatchAlarmCPUHigh: +# Type: AWS::CloudWatch::Alarm +# Properties: +# +# CloudWatchAlarmMemoryHigh: +# Type: AWS::CloudWatch::Alarm +# Properties: +# +# CloudWatchAlarmMetricsLow: +# Type: AWS::CloudWatch::Alarm +# Properties: diff --git a/aws-ecs/spotfleet/export.sh b/aws-ecs/spotfleet/export.sh new file mode 100755 index 00000000..8d8c78df --- /dev/null +++ b/aws-ecs/spotfleet/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-ecs/spotfleet/main.tf b/aws-ecs/spotfleet/main.tf new file mode 100644 index 00000000..8bdb5513 --- /dev/null +++ b/aws-ecs/spotfleet/main.tf @@ -0,0 +1,72 @@ +/* + * Provision a Spotfleet for an ECS cluster using CloudFormation stack. + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_role" "cloudformation" { + name = "ecs-cloudformation-role" +} + +data "aws_iam_role" "spotfleet" { + name = "ecs-spotfleet-role" +} + +data "aws_iam_role" "autoscaling" { + name = "ecs-autoscaling-role" +} + +data "aws_iam_instance_profile" "clusternode" { + name = "ecs-clusternode-instanceprofile" +} + +data "aws_vpc" "tenant" { + default = var.vpc_default + tags = var.vpc_tags +} + +data "aws_availability_zones" "available_zones" {} + +data "aws_subnet_ids" "tenant" { + vpc_id = data.aws_vpc.tenant.id + // tags { + // Name = "private_subnet_*" + // } +} + +data "aws_ami" "clusternode" { + filter { + name = "name" + values = [var.image_name] + } + filter { + name = "state" + values = ["available"] + } + most_recent = true + owners = [replace(var.image_owner, "/\\A\\z/", data.aws_caller_identity.current.account_id)] +} + +data "aws_security_group" "clusternode" { + name = "${local.cluster_name}-NodeSG" +} + +resource "aws_cloudformation_stack" "ecs_spotfleet" { + name = "${local.cluster_name}-spotfleet" + capabilities = ["CAPABILITY_IAM"] + iam_role_arn = data.aws_iam_role.cloudformation.arn + parameters = { + ClusterName = local.cluster_name + AmiId = data.aws_ami.clusternode.id + InstanceType = var.instance_type + AvailabilityZones = join(",", data.aws_availability_zones.available_zones.names) + ClusterNodeSubnets = join(",", data.aws_subnet_ids.tenant.ids) + ClusterNodesMin = var.nodes_min + ClusterNodesMax = var.nodes_max + ClusterNodesDesired = var.nodes_desired + ClusterNodeSG = data.aws_security_group.clusternode.id + ClusterNodeInstanceProfile = data.aws_iam_instance_profile.clusternode.arn + ClusterSpotFleetRole = data.aws_iam_role.spotfleet.arn + ClusterAutoscalingRole = data.aws_iam_role.autoscaling.arn + } + template_body = file(format("%s/%s.yml", var.cloudformation_path, var.cluster_template)) +} diff --git a/aws-ecs/spotfleet/vars.tf b/aws-ecs/spotfleet/vars.tf new file mode 100644 index 00000000..15ec4e89 --- /dev/null +++ b/aws-ecs/spotfleet/vars.tf @@ -0,0 +1,77 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} + +variable "cluster_template" { + description = "The CloudFormation template used to create the ECS cluster SpotFleet" + default = "ecs_spotfleet" +} + +variable "environment" { + description = "The name of the environment associated with the cluster" +} + +variable "context" { + description = "Contextual naming of the ECS cluster (eg. user, service, etc.)" + default = "" +} + +variable "vpc_default" { + description = "Indicate whether to deploy in the default VPC" + default = true +} + +variable "vpc_tags" { + type = "map" + description = "A map of tags to match on the VPC lookup" + default = {} +} + +variable "appmesh_enabled" { + description = "Enable AppMesh for the cluster" + default = "False" + // validation { + // condition = var.appmesh_enabled == "true" || var.appmesh_enabled == "false" + // error_message = "Must be a boolean value" + // } +} + +variable "image_name" { + description = "AWS image for Sling instance" + default = "amzn2-ami-ecs-hvm-2.0.????????-x86_64-ebs" +} + +variable "image_owner" { + description = "AMI image owner (leave blank for current account)" + default = "591542846629" +} + +variable "instance_type" { + description = "AWS instance type for Sling" + default = "t3.micro" +} + +variable "nodes_max" { + description = "Maximum allowed nodes in the cluster" + type = number + default = 1 +} + +variable "nodes_min" { + description = "Minimum required nodes in the cluster" + type = number + default = 1 +} + +variable "nodes_desired" { + description = "Suggested nodes in the cluster" + type = number + default = 1 +} + +locals { + env_string = var.context == "" ? var.environment : format("%s-%s", var.environment, var.context) + cluster_name = var.context == "" ? format("%s-cluster", var.environment) : format("%s-cluster-%s", var.environment, var.context) + account_hash = substr(sha256(data.aws_caller_identity.current.account_id), -10, 10) +} diff --git a/aws-ecs/task-definition/.dockerignore b/aws-ecs/task-definition/.dockerignore new file mode 100644 index 00000000..79655ca1 --- /dev/null +++ b/aws-ecs/task-definition/.dockerignore @@ -0,0 +1 @@ +backend.tf diff --git a/aws-ecs/task-definition/.gitignore b/aws-ecs/task-definition/.gitignore new file mode 100644 index 00000000..546edd5d --- /dev/null +++ b/aws-ecs/task-definition/.gitignore @@ -0,0 +1,2 @@ +*.json +backend.tf diff --git a/aws-ecs/task-definition/main.tf b/aws-ecs/task-definition/main.tf new file mode 100644 index 00000000..7a0380e1 --- /dev/null +++ b/aws-ecs/task-definition/main.tf @@ -0,0 +1,26 @@ +resource "aws_cloudwatch_log_group" "log" { + name = "ecs/${var.name}" + retention_in_days = 14 +} + +module "task_definition" { + source = "figurate/ecs-task-definition/aws" + + execution_role = null + image = var.image + image_tag = var.image_tag + cpu = var.cpu + memory = var.memory + log_group = aws_cloudwatch_log_group.log.name + ports = var.ports + network_mode = var.network_mode + task_role = "ecs-task-role" + namespace = var.namespace + name = var.name + type = "default" + health_check = var.health_check + volumes = var.volumes + mounts = var.mounts + task_environment = var.task_environment + docker_labels = var.docker_labels +} diff --git a/aws-ecs/task-definition/vars.tf b/aws-ecs/task-definition/vars.tf new file mode 100644 index 00000000..bd85bdc1 --- /dev/null +++ b/aws-ecs/task-definition/vars.tf @@ -0,0 +1,72 @@ +variable "name" { + description = "Task definition name" +} + +variable "image" { + description = "ECR registry name" +} + +variable "namespace" { + description = "Provides a context for the intended deployment of the Task Definition (e.g. environment, etc.)" + default = null +} + +variable "image_tag" { + description = "Docker image tag for ECS service" + default = "latest" +} + +variable "cpu" { + description = "Required vCPU units for the service" + type = number + default = 256 +} + +variable "memory" { + description = "Required memory for the service" + type = number + default = 256 +} + +variable "ports" { + description = "A list of port mappings to publish" + type = list(tuple([number, number])) + default = [] +} + +variable "network_mode" { + description = "Network mode for service containers (available options: `bridge`, `host`, `awsvpc`)" + default = "bridge" +} + +variable "volumes" { + description = "A list of volume names and host paths to mount on the container" + type = list(tuple([string, string])) + default = [] +} + +variable "mounts" { + description = "A list of volume ids and mount paths for the container" + type = list(tuple([string, string, bool])) + default = [] +} + +variable "task_environment" { + description = "A map of environment variables configured on the primary container" + type = map(string) + default = { + NGINX_ENVSUBST_TEMPLATE_DIR = "/opt/nginx/templates" + } +} + +variable "docker_labels" { + description = "A map of docker labels to attach to the container definition" + type = map(any) + default = {} +} + +variable "health_check" { + description = "The command, interval, timeout and number of retries for health check of the primary container" + type = tuple([list(string), number, number, number]) + default = null +} diff --git a/aws-efs/filesystem/README.md b/aws-efs/filesystem/README.md new file mode 100644 index 00000000..8a10303d --- /dev/null +++ b/aws-efs/filesystem/README.md @@ -0,0 +1,30 @@ +# EFS Filesystem Creation + +Create an EFS filesystem and mount points. + +## Requirements + +| Name | Version | +|------|---------| +| aws | >= 2.7.0 | + +## Providers + +| Name | Version | +|------|---------| +| aws | >= 2.7.0 | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| assume\_role\_account | AWS account ID for the role to assume into | `any` | `null` | no | +| context | Contextual naming of the ECS cluster (eg. user, service, etc.) | `string` | `""` | no | +| environment | The name of the environment associated with the cluster | `any` | n/a | yes | +| vpc\_default | Indicate whether to deploy in the default VPC | `bool` | `true` | no | +| vpc\_tags | A map of tags to match on the VPC lookup | `map(any)` | `{}` | no | + +## Outputs + +No output. + diff --git a/aws-iam/README.md b/aws-iam/README.md new file mode 100644 index 00000000..2a7c099e --- /dev/null +++ b/aws-iam/README.md @@ -0,0 +1,13 @@ +# Provision users/groups + +Support for creation of users and groups in the following environments: + +* Amazon Web Services (AWS) + +### Example: + + $ accelerator/build.sh && docker run --rm -it -p8081:80 -d bedrock/accelerator + + $ TF_VAR_users=fred,betty,barney TF_VAR_groups=power-user,ecs-admin TF_VAR_region=ap-southeast-2 bash <(curl http://localhost:8081/user/config.sh) + + $ bash <(curl http://localhost:8081/user/aws.sh) \ No newline at end of file diff --git a/aws-iam/keyrotation/Dockerfile b/aws-iam/keyrotation/Dockerfile new file mode 100644 index 00000000..b1b70391 --- /dev/null +++ b/aws-iam/keyrotation/Dockerfile @@ -0,0 +1,6 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ +ADD lambda /bootstrap/lambda + +ENV TF_VAR_lambda_path="/bootstrap/lambda" diff --git a/aws-iam/keyrotation/README.md b/aws-iam/keyrotation/README.md new file mode 100644 index 00000000..01122af2 --- /dev/null +++ b/aws-iam/keyrotation/README.md @@ -0,0 +1,14 @@ +# IAM Access Key Rotation Management + +Support for automatic disabling/deletion of old API access keys. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| key\_max\_age | The maximum age (in days) of an IAM access key before it is disabled. | string | `90` | no | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| lambda\_timeout | Maximum time (seconds) to allow the lambda to execute | string | `15` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-iam/keyrotation/lambda/IamKeyRotation.py b/aws-iam/keyrotation/lambda/IamKeyRotation.py new file mode 100644 index 00000000..26be5690 --- /dev/null +++ b/aws-iam/keyrotation/lambda/IamKeyRotation.py @@ -0,0 +1,53 @@ +import os +from datetime import datetime, timezone + +import boto3 +from botocore.exceptions import ClientError + +key_max_age = os.environ['KeyMaxAge'] + + +def lambda_handler(event, context): + user_ids = get_user_ids(event) + + for userid in user_ids: + rotate_access_keys(userid, key_max_age) + + +def get_user_ids(event): + if 'UserId' in event: + return [event['UserId']] + + iam = boto3.client('iam') + + if 'Group' in event: + group = iam.get_group(GroupName=event['Group']) + return list(map(lambda i: i['UserName'], group['Users'])) + + try: + lambdac = boto3.client('lambda') + lambda_config = lambdac.get_function_configuration(FunctionName='IamKeyRotation') + if 'Group' in lambda_config['Environment']['Variables']: + group = iam.get_group(GroupName=lambda_config['Environment']['Variables']['Group']) + return list(map(lambda i: i['UserName'], group['Users'])) + else: + return [lambda_config['Environment']['Variables']['UserId']] + except ClientError as e: + print(e) + + +def rotate_access_keys(userid, max_age): + iam = boto3.client('iam') + + access_keys = iam.list_access_keys(UserName=userid) + + for key in access_keys['AccessKeyMetadata']: + if key['Status'] == 'Inactive': + # If key already inactive delete it.. + print(f"Deleting disabled key: {key['AccessKeyId']}") + iam.delete_access_key(UserName=userid, AccessKeyId=key['AccessKeyId']) + elif (datetime.now(timezone.utc) - key['CreateDate']).days > max_age: + # Disable active keys older than max_age.. + print(f"Disabling old key: {key['AccessKeyId']}") + iam.update_access_key(UserName=userid, AccessKeyId=key['AccessKeyId'], Status='Inactive') + # new_access_key = iam.create_access_key(UserName=userid) diff --git a/aws-iam/keyrotation/main.tf b/aws-iam/keyrotation/main.tf new file mode 100644 index 00000000..112c8c25 --- /dev/null +++ b/aws-iam/keyrotation/main.tf @@ -0,0 +1,38 @@ +/** + * # IAM Access Key Rotation Management + * + * Support for automatic disabling/deletion of old API access keys. + */ +data "archive_file" "iam_keyrotation" { + output_path = "iam_keyrotation.zip" + type = "zip" + source_dir = format("%s", var.lambda_path) +} + +data "aws_iam_role" "iam_keyrotation" { + name = "bedrock-iam-keyrotation-role" +} + +resource "aws_lambda_function" "iam_keyrotation" { + function_name = "IamKeyRotation" + handler = "IamKeyRotation.lambda_handler" + filename = data.archive_file.iam_keyrotation.output_path + role = data.aws_iam_role.iam_keyrotation.arn + runtime = "python3.6" + source_code_hash = data.archive_file.iam_keyrotation.output_base64sha256 + timeout = var.lambda_timeout + environment { + variables { + KeyMaxAge = var.key_max_age + } + } +} + +resource "aws_cloudwatch_log_group" "iam_keyrotation" { + name = "/aws/lambda/${aws_lambda_function.iam_keyrotation.function_name}" + retention_in_days = 30 +} + +resource "aws_iam_group" "iam_keyrotation" { + name = "iam-keyrotation" +} diff --git a/aws-iam/keyrotation/vars.tf b/aws-iam/keyrotation/vars.tf new file mode 100644 index 00000000..a9f38e20 --- /dev/null +++ b/aws-iam/keyrotation/vars.tf @@ -0,0 +1,14 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} + +variable "key_max_age" { + description = "The maximum age (in days) of an IAM access key before it is disabled." + default = 90 +} + +variable "lambda_timeout" { + description = "Maximum time (seconds) to allow the lambda to execute" + default = 15 +} diff --git a/aws-iam/main.tf b/aws-iam/main.tf new file mode 100644 index 00000000..146d69b7 --- /dev/null +++ b/aws-iam/main.tf @@ -0,0 +1,10 @@ +module "iam_settings" { + source = "nozaq/secure-baseline/aws//modules/iam-baseline" + + aws_account_id = var.aws_account_id + support_iam_role_principal_arns = var.support_iam_role_principal_arns +} + +module "iam_analyzer" { + source = "nozaq/secure-baseline/aws//modules/analyzer-baseline" +} diff --git a/blueprints/aws/cloudwatch/trigger/Dockerfile b/aws-iam/poweruser/Dockerfile similarity index 100% rename from blueprints/aws/cloudwatch/trigger/Dockerfile rename to aws-iam/poweruser/Dockerfile diff --git a/aws-iam/poweruser/README.md b/aws-iam/poweruser/README.md new file mode 100644 index 00000000..4e8fd205 --- /dev/null +++ b/aws-iam/poweruser/README.md @@ -0,0 +1,28 @@ +# AWS IAM user configuration + +Purpose: Provision an IAM user in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a user that has the following privileges: + +* IAM access for creation of IAM roles specific to a blueprint +* Access to read/write Terraform state associated with the account +* Access to assume roles required to provision a blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| iam\_groups | A list of IAM groups the user belongs to. | list | `` | no | +| region | AWS default region | string | - | yes | +| username | The username of the Bedrock power user | string | - | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| aws\_access\_key\_id | The AWS access key associated with the power user | +| aws\_secret\_access\_key | The AWS access key secret associated with the power user | +| poweruser\_name | The username of the provisioned power user | + diff --git a/aws-iam/poweruser/main.tf b/aws-iam/poweruser/main.tf new file mode 100644 index 00000000..6ecfc8c7 --- /dev/null +++ b/aws-iam/poweruser/main.tf @@ -0,0 +1,54 @@ +/** + * # AWS IAM user configuration + * + * Purpose: Provision an IAM user in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a user that has the following privileges: + * + * * IAM access for creation of IAM roles specific to a blueprint + * * Access to read/write Terraform state associated with the account + * * Access to assume roles required to provision a blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["iam:ListRoles"] + resources = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/*"] + } + statement { + actions = ["sts:AssumeRole"] + resources = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/bedrock*"] + } +} + +resource "aws_iam_user" "poweruser" { + name = var.username + permissions_boundary = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-poweruser-boundary" +} + +resource "aws_iam_access_key" "poweruser" { + user = aws_iam_user.poweruser.name +} + +resource "aws_iam_user_policy_attachment" "power_user" { + policy_arn = "arn:aws:iam::aws:policy/PowerUserAccess" + user = aws_iam_user.poweruser.name +} + +resource "aws_iam_user_policy_attachment" "s3_terraform_access" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-terraform-state" + user = aws_iam_user.poweruser.name +} + +resource "aws_iam_user_policy" "iam_assume_role" { + policy = data.aws_iam_policy_document.assume_role_policy.json + user = aws_iam_user.poweruser.name +} + +resource "aws_iam_user_group_membership" "iam_groups" { + user = aws_iam_user.poweruser.name + groups = var.iam_groups +} diff --git a/aws-iam/poweruser/outputs.tf b/aws-iam/poweruser/outputs.tf new file mode 100644 index 00000000..19763739 --- /dev/null +++ b/aws-iam/poweruser/outputs.tf @@ -0,0 +1,14 @@ +output "poweruser_name" { + description = "The username of the provisioned power user" + value = "${aws_iam_user.poweruser.name}" +} + +output "aws_access_key_id" { + description = "The AWS access key associated with the power user" + value = "${aws_iam_access_key.poweruser.id}" +} + +output "aws_secret_access_key" { + description = "The AWS access key secret associated with the power user" + value = "${aws_iam_access_key.poweruser.secret}" +} diff --git a/aws-iam/poweruser/vars.tf b/aws-iam/poweruser/vars.tf new file mode 100644 index 00000000..1876f766 --- /dev/null +++ b/aws-iam/poweruser/vars.tf @@ -0,0 +1,9 @@ +variable "username" { + description = "The username of the Bedrock power user" +} + +variable "iam_groups" { + description = "A list of IAM groups the user belongs to." + type = "list" + default = ["iam-keyrotation"] +} diff --git a/blueprints/aws/s3/encrypted/provider.tf b/aws-iam/provider.tf similarity index 93% rename from blueprints/aws/s3/encrypted/provider.tf rename to aws-iam/provider.tf index 0a7427cf..98827b91 100644 --- a/blueprints/aws/s3/encrypted/provider.tf +++ b/aws-iam/provider.tf @@ -1,7 +1,7 @@ provider "aws" { version = ">= 2.7.0" assume_role { - role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-s3-admin" + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" } } diff --git a/aws-iam/vars.tf b/aws-iam/vars.tf new file mode 100644 index 00000000..2f510195 --- /dev/null +++ b/aws-iam/vars.tf @@ -0,0 +1,3 @@ +variable "aws_account_id" {} + +variable "support_iam_role_principal_arns" {} \ No newline at end of file diff --git a/blueprints/aws/codebuild/blueprint/Dockerfile b/aws-lambda/alias/Dockerfile similarity index 100% rename from blueprints/aws/codebuild/blueprint/Dockerfile rename to aws-lambda/alias/Dockerfile diff --git a/blueprints/aws/cognito/userpool/Dockerfile b/aws-lambda/layer/Dockerfile similarity index 100% rename from blueprints/aws/cognito/userpool/Dockerfile rename to aws-lambda/layer/Dockerfile diff --git a/aws-lambda/layer/README.md b/aws-lambda/layer/README.md new file mode 100644 index 00000000..79e6c689 --- /dev/null +++ b/aws-lambda/layer/README.md @@ -0,0 +1,11 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| content\_path | Path to content to include in the layer | string | - | yes | +| description | A short description of the layer contents | string | - | yes | +| layer\_name | Name of the lambda layer | string | - | yes | +| region | AWS default region | string | - | yes | +| runtimes | List of compatible runtimes for the lambda layer | list | - | yes | + diff --git a/aws-lambda/layer/vars.tf b/aws-lambda/layer/vars.tf new file mode 100644 index 00000000..19490f0f --- /dev/null +++ b/aws-lambda/layer/vars.tf @@ -0,0 +1,16 @@ +variable "layer_name" { + description = "Name of the lambda layer" +} + +variable "description" { + description = "A short description of the layer contents" +} + +variable "content_path" { + description = "Path to content to include in the layer" +} + +variable "runtimes" { + description = "List of compatible runtimes for the lambda layer" + type = "list" +} diff --git a/blueprints/dns/alias/aws/provider.tf b/aws-lambda/provider.tf similarity index 91% rename from blueprints/dns/alias/aws/provider.tf rename to aws-lambda/provider.tf index e643b28d..9f2c40fd 100644 --- a/blueprints/dns/alias/aws/provider.tf +++ b/aws-lambda/provider.tf @@ -1,7 +1,7 @@ provider "aws" { version = ">= 2.7.0" assume_role { - role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-route53-admin" + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-lambda-admin" } } diff --git a/aws-rds/cluster/Dockerfile b/aws-rds/cluster/Dockerfile new file mode 100644 index 00000000..6240fff6 --- /dev/null +++ b/aws-rds/cluster/Dockerfile @@ -0,0 +1,8 @@ +FROM bedrock/blueprint-base + +ADD export.sh /bootstrap/ + +ADD *.tf /bootstrap/ +ADD cloudformation /bootstrap/cloudformation + +ENV TF_VAR_cloudformation_path="/bootstrap/cloudformation" diff --git a/aws-rds/cluster/README.md b/aws-rds/cluster/README.md new file mode 100644 index 00000000..a88866ec --- /dev/null +++ b/aws-rds/cluster/README.md @@ -0,0 +1,8 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cloudformation\_path | The root path to cloudformation templates | string | `cloudformation` | no | +| environment | The name of the environment applied to the RDS stack | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/aws-rds/cluster/cloudformation/rds_aurora.yml b/aws-rds/cluster/cloudformation/rds_aurora.yml new file mode 100644 index 00000000..992c03c9 --- /dev/null +++ b/aws-rds/cluster/cloudformation/rds_aurora.yml @@ -0,0 +1,48 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning an Aurora cluster. + +Parameters: + AvailabilityZones: + Type: List + +Resources: + 'Fn::Transform': + Name: 'AWS::Include' + Description: Import DBCluster ParameterGroup for the specified engine + Parameters: + Location: !Sub ${BucketUrl}/${Engine}_${EngineVersion}-params.yml + + ClusterSubnetGroup: + + ClusterSecurityGroup: + + DatabaseCluster: + + PrimaryInstance: + + PrimaryInstanceCPUAlarm: + + PrimaryInstancesMemoryAlarm: + + PrimaryInstanceEndpoint: + + ReplicaInstance: + + ReplicaInstanceCPUAlarm: + + ReplicaInstancesMemoryAlarm: + + ReplicaInstanceEndpoint: + + ClusterStartSchedule: + + ClusterStartSchedulePermission: + + ClusterStopSchedule: + + ClusterStopSchedulePermission: + + ClusterSnapshotSchedule: + + ClusterSnapshotSchedulePermission: diff --git a/aws-rds/cluster/export.sh b/aws-rds/cluster/export.sh new file mode 100755 index 00000000..8d8c78df --- /dev/null +++ b/aws-rds/cluster/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-rds/cluster/main.tf b/aws-rds/cluster/main.tf new file mode 100644 index 00000000..478f820e --- /dev/null +++ b/aws-rds/cluster/main.tf @@ -0,0 +1,7 @@ +resource "aws_cloudformation_stack" "rds_aurora" { + name = "${var.environment}-rds-cluster" + parameters { + Environment = "${var.environment}" + } + template_body = "${file(format("%s/rds_aurora.yml", var.cloudformation_path))}" +} diff --git a/aws-rds/cluster/vars.tf b/aws-rds/cluster/vars.tf new file mode 100644 index 00000000..972cd7ea --- /dev/null +++ b/aws-rds/cluster/vars.tf @@ -0,0 +1,4 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} diff --git a/aws-rds/cycle/Dockerfile b/aws-rds/cycle/Dockerfile new file mode 100644 index 00000000..adeb9481 --- /dev/null +++ b/aws-rds/cycle/Dockerfile @@ -0,0 +1,8 @@ +FROM bedrock/blueprint-base + +ADD export.sh /bootstrap/ + +ADD *.tf /bootstrap/ +ADD lambda /bootstrap/lambda + +ENV TF_VAR_lambda_path="/bootstrap/lambda" diff --git a/aws-rds/cycle/README.md b/aws-rds/cycle/README.md new file mode 100644 index 00000000..2f31b811 --- /dev/null +++ b/aws-rds/cycle/README.md @@ -0,0 +1,7 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-rds/cycle/export.sh b/aws-rds/cycle/export.sh new file mode 100755 index 00000000..67bd0600 --- /dev/null +++ b/aws-rds/cycle/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/lambda ./ diff --git a/aws-rds/cycle/lambda/RdsCycleCluster.py b/aws-rds/cycle/lambda/RdsCycleCluster.py new file mode 100644 index 00000000..68d0355c --- /dev/null +++ b/aws-rds/cycle/lambda/RdsCycleCluster.py @@ -0,0 +1,48 @@ +import boto3 +from botocore.exceptions import ClientError + + +def lambda_handler(event, context): + cluster_name = get_cluster_name(event) + result = None + if event['Action'] == 'StartIntance': + result = start_cluster(cluster_name) + elif event['Action'] == 'StopIntance': + result = stop_cluster(cluster_name) + print(result) + + +def get_cluster_name(event): + if event['ClusterName']: + return event['ClusterName'] + + lambda_ref = boto3.client('lambda') + try: + lambda_config = lambda_ref.get_function_configuration( + FunctionName='RdsCycleCluster' + ) + return lambda_config['Environment']['Variables']['ClusterName'] + except ClientError as e: + print(e) + + +def start_cluster(cluster_name): + rds = boto3.client('rds') + try: + result = rds.start_db_cluster( + DBClusterIdentifier=cluster_name + ) + return result + except ClientError as e: + print(e) + + +def stop_cluster(cluster_name): + rds = boto3.client('rds') + try: + result = rds.stop_db_cluster( + DBClusterIdentifier=cluster_name + ) + return result + except ClientError as e: + print(e) diff --git a/aws-rds/cycle/lambda/RdsCycleInstance.py b/aws-rds/cycle/lambda/RdsCycleInstance.py new file mode 100644 index 00000000..aac50080 --- /dev/null +++ b/aws-rds/cycle/lambda/RdsCycleInstance.py @@ -0,0 +1,48 @@ +import boto3 +from botocore.exceptions import ClientError + + +def lambda_handler(event, context): + instance_name = get_instance_name(event) + result = None + if event['Action'] == 'StartIntance': + result = start_instance(instance_name) + elif event['Action'] == 'StopIntance': + result = stop_instance(instance_name) + print(result) + + +def get_instance_name(event): + if event['InstanceName']: + return event['InstanceName'] + + lambda_ref = boto3.client('lambda') + try: + lambda_config = lambda_ref.get_function_configuration( + FunctionName='RdsCycleInstance' + ) + return lambda_config['Environment']['Variables']['InstanceName'] + except ClientError as e: + print(e) + + +def start_instance(instance_name): + rds = boto3.client('rds') + try: + result = rds.start_db_instance( + DBInstanceIdentifier=instance_name + ) + return result + except ClientError as e: + print(e) + + +def stop_instance(instance_name): + rds = boto3.client('rds') + try: + result = rds.stop_db_instance( + DBInstanceIdentifier=instance_name + ) + return result + except ClientError as e: + print(e) diff --git a/aws-rds/cycle/main.tf b/aws-rds/cycle/main.tf new file mode 100644 index 00000000..c50b2740 --- /dev/null +++ b/aws-rds/cycle/main.tf @@ -0,0 +1,37 @@ +data "archive_file" "rds_cycle" { + output_path = "rds_cycle.zip" + type = "zip" + source_dir = "${format("%s", var.lambda_path)}" +} + +data "aws_iam_role" "rds_cycle" { + name = "bedrock-rds-cycle-role" +} + +resource "aws_lambda_function" "rds_cycle_instance" { + function_name = "RdsCycleInstance" + handler = "RdsCycleInstance.lambda_handler" + filename = "${data.archive_file.rds_cycle.output_path}" + role = "${data.aws_iam_role.rds_cycle.arn}" + runtime = "python3.6" + source_code_hash = "${data.archive_file.rds_cycle.output_base64sha256}" +} + +resource "aws_cloudwatch_log_group" "rds_cycle_instance" { + name = "/aws/lambda/${aws_lambda_function.rds_cycle_instance.function_name}" + retention_in_days = 30 +} + +resource "aws_lambda_function" "rds_cycle_cluster" { + function_name = "RdsCycleCluster" + handler = "RdsCycleCluster.lambda_handler" + filename = "${data.archive_file.rds_cycle.output_path}" + role = "${data.aws_iam_role.rds_cycle.arn}" + runtime = "python3.7" + source_code_hash = "${data.archive_file.rds_cycle.output_base64sha256}" +} + +resource "aws_cloudwatch_log_group" "rds_cycle_cluster" { + name = "/aws/lambda/${aws_lambda_function.rds_cycle_cluster.function_name}" + retention_in_days = 30 +} diff --git a/aws-rds/cycle/vars.tf b/aws-rds/cycle/vars.tf new file mode 100644 index 00000000..4b0fb0d1 --- /dev/null +++ b/aws-rds/cycle/vars.tf @@ -0,0 +1,4 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} diff --git a/aws-rds/instance/Dockerfile b/aws-rds/instance/Dockerfile new file mode 100644 index 00000000..6240fff6 --- /dev/null +++ b/aws-rds/instance/Dockerfile @@ -0,0 +1,8 @@ +FROM bedrock/blueprint-base + +ADD export.sh /bootstrap/ + +ADD *.tf /bootstrap/ +ADD cloudformation /bootstrap/cloudformation + +ENV TF_VAR_cloudformation_path="/bootstrap/cloudformation" diff --git a/aws-rds/instance/README.md b/aws-rds/instance/README.md new file mode 100644 index 00000000..a88866ec --- /dev/null +++ b/aws-rds/instance/README.md @@ -0,0 +1,8 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cloudformation\_path | The root path to cloudformation templates | string | `cloudformation` | no | +| environment | The name of the environment applied to the RDS stack | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/aws-rds/instance/cloudformation/rds_instance.yml b/aws-rds/instance/cloudformation/rds_instance.yml new file mode 100644 index 00000000..b1b2ab37 --- /dev/null +++ b/aws-rds/instance/cloudformation/rds_instance.yml @@ -0,0 +1,38 @@ +AWSTemplateFormatVersion: 2010-09-09 + +Description: A Cloudformation template for provisioning an RDS instance. + +Parameters: + +Resources: + 'Fn::Transform': + Name: 'AWS::Include' + Description: Import DBInstance ParameterGroup for the specified engine + Parameters: + Location: !Sub ${BucketUrl}/${Engine}_${EngineVersion}-params.yml + + InstanceSubnetGroup: + + InstanceSecurityGroup: + + DatabaseInstance: + + InstanceCPUAlarm: + + InstancesMemoryAlarm: + + InstanceEndpoint: + + InstanceStartSchedule: + + InstanceStartSchedulePermission: + + InstanceStopSchedule: + + InstanceStopSchedulePermission: + + InstanceSnapshotSchedule: + + InstanceSnapshotSchedulePermission: + +Outputs: diff --git a/aws-rds/instance/export.sh b/aws-rds/instance/export.sh new file mode 100755 index 00000000..8d8c78df --- /dev/null +++ b/aws-rds/instance/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-rds/instance/main.tf b/aws-rds/instance/main.tf new file mode 100644 index 00000000..0981a86a --- /dev/null +++ b/aws-rds/instance/main.tf @@ -0,0 +1,7 @@ +resource "aws_cloudformation_stack" "rds_instance" { + name = "${var.environment}-rds-instance" + parameters { + Environment = "${var.environment}" + } + template_body = "${file(format("%s/rds_instance.yml", var.cloudformation_path))}" +} diff --git a/aws-rds/instance/vars.tf b/aws-rds/instance/vars.tf new file mode 100644 index 00000000..972cd7ea --- /dev/null +++ b/aws-rds/instance/vars.tf @@ -0,0 +1,4 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} diff --git a/blueprints/blueprint/usergroup/provider.tf b/aws-rds/provider.tf similarity index 100% rename from blueprints/blueprint/usergroup/provider.tf rename to aws-rds/provider.tf diff --git a/aws-rds/snapshot/Dockerfile b/aws-rds/snapshot/Dockerfile new file mode 100644 index 00000000..adeb9481 --- /dev/null +++ b/aws-rds/snapshot/Dockerfile @@ -0,0 +1,8 @@ +FROM bedrock/blueprint-base + +ADD export.sh /bootstrap/ + +ADD *.tf /bootstrap/ +ADD lambda /bootstrap/lambda + +ENV TF_VAR_lambda_path="/bootstrap/lambda" diff --git a/aws-rds/snapshot/README.md b/aws-rds/snapshot/README.md new file mode 100644 index 00000000..2f31b811 --- /dev/null +++ b/aws-rds/snapshot/README.md @@ -0,0 +1,7 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-rds/snapshot/export.sh b/aws-rds/snapshot/export.sh new file mode 100755 index 00000000..67bd0600 --- /dev/null +++ b/aws-rds/snapshot/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/lambda ./ diff --git a/aws-rds/snapshot/lambda/RdsSnapshotCluster.py b/aws-rds/snapshot/lambda/RdsSnapshotCluster.py new file mode 100644 index 00000000..db298655 --- /dev/null +++ b/aws-rds/snapshot/lambda/RdsSnapshotCluster.py @@ -0,0 +1,44 @@ +from datetime import datetime + +import boto3 +from botocore.exceptions import ClientError + + +def lambda_handler(event, context): + cluster_name = get_cluster_name(event) + result = snapshot_cluster(cluster_name) + print(result) + + +def get_cluster_name(event): + if event['ClusterName']: + return event['ClusterName'] + + lambda_ref = boto3.client('lambda') + try: + lambda_config = lambda_ref.get_function_configuration( + FunctionName='RdsSnapshotCluster' + ) + return lambda_config['Environment']['Variables']['ClusterName'] + except ClientError as e: + print(e) + + +def snapshot_cluster(cluster_name): + rds = boto3.client('rds') + timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M') + snapshot_identifier = '%s-%s' % (cluster_name, timestamp) + try: + result = rds.create_db_snapshot( + DBClusterIdentifier=cluster_name, + DBSnapshotIdentifer=snapshot_identifier, + Tags=[ + { + 'Key': 'CreatedOn', + 'Value': timestamp + } + ] + ) + return result + except ClientError as e: + print(e) diff --git a/aws-rds/snapshot/lambda/RdsSnapshotInstance.py b/aws-rds/snapshot/lambda/RdsSnapshotInstance.py new file mode 100644 index 00000000..73abe00a --- /dev/null +++ b/aws-rds/snapshot/lambda/RdsSnapshotInstance.py @@ -0,0 +1,44 @@ +from datetime import datetime + +import boto3 +from botocore.exceptions import ClientError + + +def lambda_handler(event, context): + instance_name = get_instance_name(event) + result = snapshot_instance(instance_name) + print(result) + + +def get_instance_name(event): + if event['InstanceName']: + return event['InstanceName'] + + lambda_ref = boto3.client('lambda') + try: + lambda_config = lambda_ref.get_function_configuration( + FunctionName='RdsSnapshotInstance' + ) + return lambda_config['Environment']['Variables']['InstanceName'] + except ClientError as e: + print(e) + + +def snapshot_instance(instance_name): + rds = boto3.client('rds') + timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M') + snapshot_identifier = '%s-%s' % (instance_name, timestamp) + try: + result = rds.create_db_snapshot( + DBInstanceIdentifier=instance_name, + DBSnapshotIdentifer=snapshot_identifier, + Tags=[ + { + 'Key': 'CreatedOn', + 'Value': timestamp + } + ] + ) + return result + except ClientError as e: + print(e) diff --git a/aws-rds/snapshot/main.tf b/aws-rds/snapshot/main.tf new file mode 100644 index 00000000..653f7233 --- /dev/null +++ b/aws-rds/snapshot/main.tf @@ -0,0 +1,35 @@ +data "archive_file" "rds_snapshot" { + output_path = "rds_snapshot.zip" + type = "zip" + source_dir = "${format("%s", var.lambda_path)}" +} + +data "aws_iam_role" "rds_snapshot" { + name = "bedrock-rds-snapshot-role" +} + +resource "aws_lambda_function" "rds_snapshot_instance" { + function_name = "RdsSnapshotInstance" + handler = "RdsSnapshotInstance.lambda_handler" + role = "${data.aws_iam_role.rds_snapshot.arn}" + runtime = "python3.6" + source_code_hash = "${data.archive_file.rds_snapshot.output_base64sha256}" +} + +resource "aws_cloudwatch_log_group" "rds_snapshot_instance" { + name = "/aws/lambda/${aws_lambda_function.rds_snapshot_instance.function_name}" + retention_in_days = 30 +} + +resource "aws_lambda_function" "rds_snapshot_cluster" { + function_name = "RdsSnapshotCluster" + handler = "RdsSnapshotCluster.lambda_handler" + role = "${data.aws_iam_role.rds_snapshot.arn}" + runtime = "python3.7" + source_code_hash = "${data.archive_file.rds_snapshot.output_base64sha256}" +} + +resource "aws_cloudwatch_log_group" "rds_snapshot_cluster" { + name = "/aws/lambda/${aws_lambda_function.rds_snapshot_cluster.function_name}" + retention_in_days = 30 +} diff --git a/aws-rds/snapshot/vars.tf b/aws-rds/snapshot/vars.tf new file mode 100644 index 00000000..4b0fb0d1 --- /dev/null +++ b/aws-rds/snapshot/vars.tf @@ -0,0 +1,4 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} diff --git a/aws-route53/alias/README.md b/aws-route53/alias/README.md new file mode 100644 index 00000000..8fc25157 --- /dev/null +++ b/aws-route53/alias/README.md @@ -0,0 +1,10 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| fqdn | Fully-qualified domain name for the record | string | - | yes | +| record\_type | Indicates the type of DNS record (A, CNAME, etc.) | string | `A` | no | +| region | AWS default region | string | - | yes | +| target | An alias target for the DNS record | string | - | yes | + diff --git a/aws-route53/alias/vars.tf b/aws-route53/alias/vars.tf new file mode 100644 index 00000000..b4b82692 --- /dev/null +++ b/aws-route53/alias/vars.tf @@ -0,0 +1,16 @@ +variable "fqdn" { + description = "Fully-qualified domain name for the record" +} + +variable "record_type" { + description = "Indicates the type of DNS record (A, CNAME, etc.)" + default = "A" +} + +variable "target" { + description = "An alias target for the DNS record" +} + +locals { + hosted_zone = join(".", slice(split(".", var.fqdn), 1, length(split(".", var.fqdn)))) +} \ No newline at end of file diff --git a/blueprints/aws/config/rule/Dockerfile b/aws-route53/privatezone/Dockerfile similarity index 100% rename from blueprints/aws/config/rule/Dockerfile rename to aws-route53/privatezone/Dockerfile diff --git a/aws-route53/privatezone/README.md b/aws-route53/privatezone/README.md new file mode 100644 index 00000000..6daa92de --- /dev/null +++ b/aws-route53/privatezone/README.md @@ -0,0 +1,9 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| fqdn | A fully qualified domain name (FQDN) that is the basis for the hosted zone | string | - | yes | +| region | AWS default region | string | - | yes | +| vpc\_id | Identifier of VPC to associated private zone with (leave blank to indicate default VPC) | string | `` | no | + diff --git a/aws-route53/privatezone/main.tf b/aws-route53/privatezone/main.tf new file mode 100644 index 00000000..74fa3022 --- /dev/null +++ b/aws-route53/privatezone/main.tf @@ -0,0 +1,12 @@ +data "aws_vpc" "tenant" { + default = "${var.vpc_default}" + tags = "${var.vpc_tags}" +} + +resource "aws_route53_zone" "primary" { + name = "${var.fqdn}" + + vpc { + vpc_id = "${data.aws_vpc.tenant.id}" + } +} diff --git a/aws-route53/privatezone/vars.tf b/aws-route53/privatezone/vars.tf new file mode 100644 index 00000000..0d10d14f --- /dev/null +++ b/aws-route53/privatezone/vars.tf @@ -0,0 +1,19 @@ +variable "vpc_default" { + description = "Boolean value to indicate whether the matched VPC should be default for the region" + default = "true" +} + +variable "vpc_tags" { + type = "map" + description = "A map of tags to match on the VPC lookup" + default = {} +} + +variable "fqdn" { + description = "A fully qualified domain name (FQDN) that is the basis for the hosted zone" +} + +variable "vpc_id" { + description = "Identifier of VPC to associated private zone with (leave blank to indicate default VPC)" + default = "" +} diff --git a/aws-route53/provider.tf b/aws-route53/provider.tf new file mode 100644 index 00000000..9823fedc --- /dev/null +++ b/aws-route53/provider.tf @@ -0,0 +1,15 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-route53-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/aws/dynamodb/table/Dockerfile b/aws-route53/publiczone/Dockerfile similarity index 100% rename from blueprints/aws/dynamodb/table/Dockerfile rename to aws-route53/publiczone/Dockerfile diff --git a/aws-route53/publiczone/README.md b/aws-route53/publiczone/README.md new file mode 100644 index 00000000..fbaf2102 --- /dev/null +++ b/aws-route53/publiczone/README.md @@ -0,0 +1,10 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| apex\_redirect\_fqdn | The FQDN to redirect requests for the apex domain of the hosted zone | string | `` | no | +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| fqdn | A fully qualified domain name (FQDN) that is the basis for the hosted zone | string | - | yes | +| region | AWS default region | string | - | yes | +| source\_cidrs | Restrict S3 website access to the specified CIDR blocks of IP addresses | list | `` | no | + diff --git a/aws-route53/publiczone/main.tf b/aws-route53/publiczone/main.tf new file mode 100644 index 00000000..994433b6 --- /dev/null +++ b/aws-route53/publiczone/main.tf @@ -0,0 +1,43 @@ +resource "aws_route53_zone" "primary" { + name = var.fqdn +} + +data "aws_iam_policy_document" "bucket_policy" { + statement { + sid = "PublicReadGetObject" + effect = "Allow" + principals { + identifiers = ["*"] + type = "AWS" + } + actions = ["s3:GetObject"] + resources = ["arn:aws:s3:::${var.fqdn}/*"] + condition { + test = "IpAddress" + variable = "aws:SourceIp" + values = var.source_cidrs + } + } +} + +resource "aws_s3_bucket" "apex_redirect" { + count = length(var.apex_redirect_fqdn) > 0 ? 1 : 0 + bucket = var.fqdn + acl = "public-read" + policy = data.aws_iam_policy_document.bucket_policy.json + website { + redirect_all_requests_to = var.apex_redirect_fqdn + } +} + +resource "aws_route53_record" "www" { + count = length(var.apex_redirect_fqdn) > 0 ? 1 : 0 + zone_id = aws_route53_zone.primary.zone_id + name = var.fqdn + type = "A" + alias { + evaluate_target_health = false + name = aws_s3_bucket.apex_redirect[0].website_domain + zone_id = aws_s3_bucket.apex_redirect[0].hosted_zone_id + } +} diff --git a/aws-route53/publiczone/vars.tf b/aws-route53/publiczone/vars.tf new file mode 100644 index 00000000..9adf33bc --- /dev/null +++ b/aws-route53/publiczone/vars.tf @@ -0,0 +1,14 @@ +variable "fqdn" { + description = "A fully qualified domain name (FQDN) that is the basis for the hosted zone" +} + +variable "apex_redirect_fqdn" { + description = "The FQDN to redirect requests for the apex domain of the hosted zone" + default = "" +} + +variable "source_cidrs" { + description = "Restrict S3 website access to the specified CIDR blocks of IP addresses" + type = "list" + default = ["0.0.0.0/0"] +} diff --git a/aws-route53/record/README.md b/aws-route53/record/README.md new file mode 100644 index 00000000..c8ac2dde --- /dev/null +++ b/aws-route53/record/README.md @@ -0,0 +1,11 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| fqdn | Fully-qualified domain name for the record | string | - | yes | +| record\_ttl | The time to live (TTL) in seconds | string | `900` | no | +| record\_type | Indicates the type of DNS record (A, CNAME, etc.) | string | `A` | no | +| region | AWS default region | string | - | yes | +| targets | A list of target values for the DNS record | list | - | yes | + diff --git a/aws-route53/redirect/README.md b/aws-route53/redirect/README.md new file mode 100644 index 00000000..fbaf2102 --- /dev/null +++ b/aws-route53/redirect/README.md @@ -0,0 +1,10 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| apex\_redirect\_fqdn | The FQDN to redirect requests for the apex domain of the hosted zone | string | `` | no | +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| fqdn | A fully qualified domain name (FQDN) that is the basis for the hosted zone | string | - | yes | +| region | AWS default region | string | - | yes | +| source\_cidrs | Restrict S3 website access to the specified CIDR blocks of IP addresses | list | `` | no | + diff --git a/aws-s3/README.md b/aws-s3/README.md new file mode 100644 index 00000000..f9b53c5d --- /dev/null +++ b/aws-s3/README.md @@ -0,0 +1,4 @@ +# Simple Storage Service (S3) Blueprints + +This directory contains blueprints associated with maintaining +AWS S3 buckets for object storage. diff --git a/aws-s3/encrypt/README.md b/aws-s3/encrypt/README.md new file mode 100644 index 00000000..d216d187 --- /dev/null +++ b/aws-s3/encrypt/README.md @@ -0,0 +1,14 @@ +# AWS Lambda function configuration + +Deploy a lambda function. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| bucket\_name | Name of the target S3 bucket | string | - | yes | +| function\_name | A unique name used to reference the function | string | - | yes | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-s3/encrypt/lambda/build.gradle b/aws-s3/encrypt/lambda/build.gradle new file mode 100644 index 00000000..fa654e02 --- /dev/null +++ b/aws-s3/encrypt/lambda/build.gradle @@ -0,0 +1,19 @@ +plugins { + id 'groovy' +} + +repositories { + mavenCentral() +} + +dependencies { + compile "org.codehaus.groovy:groovy-all:$groovyVersion" + compile "com.amazonaws:aws-java-sdk-lambda:$awsSdkVersion" +} + +task buildZip(type: Zip) { + from compileGroovy + from processResources +} + +build.dependsOn buildZip diff --git a/aws-s3/encrypt/lambda/gradle.properties b/aws-s3/encrypt/lambda/gradle.properties new file mode 100644 index 00000000..538ad0b4 --- /dev/null +++ b/aws-s3/encrypt/lambda/gradle.properties @@ -0,0 +1,2 @@ +groovyVersion = 2.5.4 +awsSdkVersion = 1.11.545 diff --git a/aws-s3/encrypt/lambda/src/main/groovy/S3EncryptFile.groovy b/aws-s3/encrypt/lambda/src/main/groovy/S3EncryptFile.groovy new file mode 100644 index 00000000..b7bea59d --- /dev/null +++ b/aws-s3/encrypt/lambda/src/main/groovy/S3EncryptFile.groovy @@ -0,0 +1,14 @@ +class HandlerInput { + String file +} + +class HandlerOutput { + boolean success +} + +class S3EncryptFile { + + def handler(HandlerInput input) { + + } +} diff --git a/aws-s3/encrypt/main.tf b/aws-s3/encrypt/main.tf new file mode 100644 index 00000000..76cc7e6e --- /dev/null +++ b/aws-s3/encrypt/main.tf @@ -0,0 +1,39 @@ +/** + * # AWS Lambda function configuration + * + * Deploy a lambda function. + */ +data "aws_caller_identity" "current" {} + +data "archive_file" "s3_encrypt" { + output_path = "s3_encrypt.zip" + type = "zip" + source_dir = "${format("%s", var.lambda_path)}" +} + +data "aws_iam_role" "s3_encrypt" { + name = "bedrock-s3-encrypt-role" +} + +data "aws_s3_bucket" "target" { + bucket = "${data.aws_caller_identity.current.account_id}-${var.bucket_name}" +} + +resource "aws_lambda_function" "s3_encrypt" { + function_name = "${var.function_name}" + handler = "S3EncryptFile.lambda_handler" + filename = "${data.archive_file.s3_encrypt.output_path}" + role = "${data.aws_iam_role.s3_encrypt.arn}" + runtime = "python3.6" + source_code_hash = "${data.archive_file.s3_encrypt.output_base64sha256}" + environment { + variables { + S3Bucket = "${data.aws_s3_bucket.target.bucket}" + } + } +} + +resource "aws_cloudwatch_log_group" "s3_encrypt" { + name = "lambda/${aws_lambda_function.s3_encrypt.function_name}" + retention_in_days = 30 +} diff --git a/aws-s3/encrypt/vars.tf b/aws-s3/encrypt/vars.tf new file mode 100644 index 00000000..c9a3dea6 --- /dev/null +++ b/aws-s3/encrypt/vars.tf @@ -0,0 +1,12 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} + +variable "function_name" { + description = "A unique name used to reference the function" +} + +variable "bucket_name" { + description = "Name of the target S3 bucket" +} diff --git a/aws-s3/encrypted/main.tf b/aws-s3/encrypted/main.tf new file mode 100644 index 00000000..e5d7aaf4 --- /dev/null +++ b/aws-s3/encrypted/main.tf @@ -0,0 +1,56 @@ +/** + * # AWS S3 bucket configuration + * + * Provision an S3 bucket in AWS. + */ +data "aws_caller_identity" "current" {} + +data "aws_s3_bucket" "access_log" { + bucket = "${data.aws_caller_identity.current.account_id}-${var.access_log_bucket}" +} + +data "archive_file" "package" { + output_path = "package.zip" + type = "zip" + source_dir = var.content_path + excludes = var.excludes +} + +resource "aws_kms_key" "encrypted_bucket" { + description = "Encryption key for ${var.bucket_name} S3 bucket" +} + +resource "aws_kms_alias" "encrypted_bucket" { + target_key_id = aws_kms_key.encrypted_bucket.id + name = "alias/${var.bucket_name}" +} + +module "encrypted_bucket" { + source = "micronode/s3-bucket/aws//modules/encrypted" + + bucket = "${data.aws_caller_identity.current.account_id}-${var.bucket_name}" + expiration_days = var.object_expires + versioned = var.version_enabled + encryption_key = aws_kms_key.encrypted_bucket.id + logging_bucket = data.aws_s3_bucket.access_log.id +} + +resource "null_resource" "package_sync" { + triggers = { + content_path = sha256(var.content_path) + includes = sha256(join(",", var.includes)) + excludes = sha256(join(",", var.excludes)) + delete_flag = sha256(var.delete) + package_hash = data.archive_file.package.output_sha + } + provisioner "local-exec" { + command = < 0 ? local.excludes_string : ""} \ + ${length(var.includes) > 0 ? local.includes_string : ""} \ + ${var.delete == "true" ? "--delete" : ""} \ + ${var.content_path} s3://${module.encrypted_bucket.bucket_id} +EOF + } + depends_on = [module.encrypted_bucket] +} diff --git a/aws-s3/encrypted/vars.tf b/aws-s3/encrypted/vars.tf new file mode 100644 index 00000000..47649af6 --- /dev/null +++ b/aws-s3/encrypted/vars.tf @@ -0,0 +1,49 @@ +variable "bucket_name" { + description = "Name of S3 bucket" +} + +variable "version_enabled" { + description = "Enable object versioning" + default = "true" +} + +variable "object_expires" { + description = "Number of days before object expiration" + default = "0" +} + +variable "access_log_bucket" { + description = "An S3 bucket used as a target for access logs" +} + +variable "restrict_public_access" { + description = "Indicates whether to block public access to this bucket" + default = "true" +} + +variable "content_path" { + description = "Root path of local content" + default = "." +} + +variable "includes" { + description = "A list of include filters to apply" + type = list(string) + default = [] +} + +variable "excludes" { + description = "A list of exclude filters to apply" + type = list(string) + default = ["*"] +} + +variable "delete" { + description = "Remove files from the destination that don't exist in the source" + default = "false" +} + +locals { + excludes_string = "--exclude \"${join("\" --exclude \"", var.excludes)}\"" + includes_string = "--include \"${join("\" --include \"", var.includes)}\"" +} diff --git a/aws-s3/file/upload/README.md b/aws-s3/file/upload/README.md new file mode 100644 index 00000000..d216d187 --- /dev/null +++ b/aws-s3/file/upload/README.md @@ -0,0 +1,14 @@ +# AWS Lambda function configuration + +Deploy a lambda function. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| bucket\_name | Name of the target S3 bucket | string | - | yes | +| function\_name | A unique name used to reference the function | string | - | yes | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-s3/file/upload/lambda/S3UploadFile.py b/aws-s3/file/upload/lambda/S3UploadFile.py new file mode 100644 index 00000000..9ce0dc6b --- /dev/null +++ b/aws-s3/file/upload/lambda/S3UploadFile.py @@ -0,0 +1,31 @@ +import boto3 +import json +import os + +s3_bucket = os.environ['S3Bucket'] + + +def lambda_handler(event): + filename = event['filename'] + filetype = event['filetype'] + presigned_url = generate_presigned_post(filename, filetype) + + return json.dumps({ + 'data': presigned_url, + 'url': 'https://%s.s3.amazonaws.com/%s' % (s3_bucket, filename) + }) + + +def generate_presigned_post(filename, filetype): + s3 = boto3.client('s3') + + return s3.generate_presigned_post( + Bucket=s3_bucket, + Key=filename, + Fields={"acl": "public-read", "Content-Type": filetype}, + Conditions=[ + {"acl": "public-read"}, + {"Content-Type": filetype} + ], + ExpiresIn=3600 + ) diff --git a/aws-s3/file/upload/main.tf b/aws-s3/file/upload/main.tf new file mode 100644 index 00000000..0760c7b3 --- /dev/null +++ b/aws-s3/file/upload/main.tf @@ -0,0 +1,39 @@ +/** + * # AWS Lambda function configuration + * + * Deploy a lambda function. + */ +data "aws_caller_identity" "current" {} + +data "archive_file" "s3_upload" { + output_path = "s3_upload.zip" + type = "zip" + source_dir = format("%s", var.lambda_path) +} + +data "aws_iam_role" "s3_upload" { + name = "bedrock-s3-upload-role" +} + +data "aws_s3_bucket" "target" { + bucket = "${data.aws_caller_identity.current.account_id}-${var.bucket_name}" +} + +resource "aws_lambda_function" "s3_upload" { + function_name = var.function_name + handler = "S3UploadFile.lambda_handler" + filename = data.archive_file.s3_upload.output_path + role = data.aws_iam_role.s3_upload.arn + runtime = "python3.6" + source_code_hash = data.archive_file.s3_upload.output_base64sha256 + environment { + variables { + S3Bucket = data.aws_s3_bucket.target.bucket + } + } +} + +resource "aws_cloudwatch_log_group" "s3_upload" { + name = "lambda/${aws_lambda_function.s3_upload.function_name}" + retention_in_days = 30 +} diff --git a/blueprints/aws/s3/encrypt/provider.tf b/aws-s3/file/upload/provider.tf similarity index 100% rename from blueprints/aws/s3/encrypt/provider.tf rename to aws-s3/file/upload/provider.tf diff --git a/aws-s3/file/upload/vars.tf b/aws-s3/file/upload/vars.tf new file mode 100644 index 00000000..c9a3dea6 --- /dev/null +++ b/aws-s3/file/upload/vars.tf @@ -0,0 +1,12 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} + +variable "function_name" { + description = "A unique name used to reference the function" +} + +variable "bucket_name" { + description = "Name of the target S3 bucket" +} diff --git a/aws-s3/provider.tf b/aws-s3/provider.tf new file mode 100644 index 00000000..b9a1ae9a --- /dev/null +++ b/aws-s3/provider.tf @@ -0,0 +1,18 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-s3-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} + +provider "null" { + version = ">= 2.1.0" +} + +provider "archive" { + version = ">= 1.2.0" +} \ No newline at end of file diff --git a/blueprints/cdn/endpoint/aws/Dockerfile b/aws-s3/trigger/lambda/Dockerfile similarity index 100% rename from blueprints/cdn/endpoint/aws/Dockerfile rename to aws-s3/trigger/lambda/Dockerfile diff --git a/aws-s3/trigger/lambda/README.md b/aws-s3/trigger/lambda/README.md new file mode 100644 index 00000000..49f2a500 --- /dev/null +++ b/aws-s3/trigger/lambda/README.md @@ -0,0 +1,11 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| bucket\_name | Name of the source S3 bucket | string | - | yes | +| filter\_suffix | A suffix filter that identifies bucket objects that can trigger the function | string | - | yes | +| function\_name | Name of the Lambda function triggered by bucket changes | string | - | yes | +| region | AWS default region | string | - | yes | +| trigger\_events | A list of bucket events that trigger the function | list | `` | no | + diff --git a/aws-s3/trigger/lambda/main.tf b/aws-s3/trigger/lambda/main.tf new file mode 100644 index 00000000..597cd4e6 --- /dev/null +++ b/aws-s3/trigger/lambda/main.tf @@ -0,0 +1,26 @@ +data "aws_caller_identity" "current" {} + +data "aws_s3_bucket" "source" { + bucket = "${data.aws_caller_identity.current.account_id}-${var.bucket_name}" +} + +data "aws_lambda_function" "trigger" { + function_name = "${var.function_name}" +} + +resource "aws_s3_bucket_notification" "trigger" { + bucket = "${data.aws_s3_bucket.source.id}" + lambda_function { + lambda_function_arn = "${data.aws_lambda_function.trigger.arn}" + events = "${var.trigger_events}" + filter_suffix = "${var.filter_suffix}" + } +} + +resource "aws_lambda_permission" "allow_bucket" { + statement_id = "AllowExecutionFromS3Bucket" + action = "lambda:InvokeFunction" + function_name = "${data.aws_lambda_function.trigger.function_name}" + principal = "s3.amazonaws.com" + source_arn = "${data.aws_s3_bucket.source.arn}" +} diff --git a/aws-s3/trigger/lambda/vars.tf b/aws-s3/trigger/lambda/vars.tf new file mode 100644 index 00000000..d887b54b --- /dev/null +++ b/aws-s3/trigger/lambda/vars.tf @@ -0,0 +1,17 @@ +variable "bucket_name" { + description = "Name of the source S3 bucket" +} + +variable "function_name" { + description = "Name of the Lambda function triggered by bucket changes" +} + +variable "trigger_events" { + description = "A list of bucket events that trigger the function" + type = "list" + default = ["s3:ObjectCreated:*"] +} + +variable "filter_suffix" { + description = "A suffix filter that identifies bucket objects that can trigger the function" +} diff --git a/blueprints/cdn/endpoint/do/Dockerfile b/aws-s3/trigger/sns/Dockerfile similarity index 100% rename from blueprints/cdn/endpoint/do/Dockerfile rename to aws-s3/trigger/sns/Dockerfile diff --git a/aws-s3/trigger/sns/README.md b/aws-s3/trigger/sns/README.md new file mode 100644 index 00000000..10af6d52 --- /dev/null +++ b/aws-s3/trigger/sns/README.md @@ -0,0 +1,11 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| bucket\_name | Name of the source S3 bucket | string | - | yes | +| filter\_suffix | A suffix filter that identifies bucket objects that can trigger the function | string | - | yes | +| region | AWS default region | string | - | yes | +| topic\_name | Name of the SNS topic to publish bucket changes | string | - | yes | +| trigger\_events | A list of bucket events that trigger the function | list | `` | no | + diff --git a/aws-s3/trigger/sns/main.tf b/aws-s3/trigger/sns/main.tf new file mode 100644 index 00000000..23bda552 --- /dev/null +++ b/aws-s3/trigger/sns/main.tf @@ -0,0 +1,18 @@ +data "aws_caller_identity" "current" {} + +data "aws_s3_bucket" "source" { + bucket = "${data.aws_caller_identity.current.account_id}-${var.bucket_name}" +} + +data "aws_sns_topic" "trigger" { + name = "${var.topic_name}" +} + +resource "aws_s3_bucket_notification" "trigger" { + bucket = "${data.aws_s3_bucket.source.id}" + topic { + topic_arn = "${data.aws_sns_topic.trigger.arn}" + events = "${var.trigger_events}" + filter_suffix = "${var.filter_suffix}" + } +} diff --git a/aws-s3/trigger/sns/vars.tf b/aws-s3/trigger/sns/vars.tf new file mode 100644 index 00000000..8a07c51b --- /dev/null +++ b/aws-s3/trigger/sns/vars.tf @@ -0,0 +1,17 @@ +variable "bucket_name" { + description = "Name of the source S3 bucket" +} + +variable "topic_name" { + description = "Name of the SNS topic to publish bucket changes" +} + +variable "trigger_events" { + description = "A list of bucket events that trigger the function" + type = "list" + default = ["s3:ObjectCreated:*"] +} + +variable "filter_suffix" { + description = "A suffix filter that identifies bucket objects that can trigger the function" +} diff --git a/blueprints/dns/alias/aws/Dockerfile b/aws-s3/website/Dockerfile similarity index 100% rename from blueprints/dns/alias/aws/Dockerfile rename to aws-s3/website/Dockerfile diff --git a/aws-s3/website/README.md b/aws-s3/website/README.md new file mode 100644 index 00000000..e2b1c4ac --- /dev/null +++ b/aws-s3/website/README.md @@ -0,0 +1,24 @@ +# AWS S3 website configuration + +Provision a static website using an S3 bucket in AWS. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| access\_log\_bucket | An S3 bucket used as a target for access logs | string | `` | no | +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| content\_path | Root path of local website content | string | `.` | no | +| create\_route53\_record | Boolean value to indicate whether route53 record is created | string | `true` | no | +| delete | Remove files from the destination that don't exist in the source | string | `false` | no | +| error\_page | HTML error page | string | `error.html` | no | +| excludes | A list of exclude filters to apply | list | `` | no | +| fqdn | Website domain | string | - | yes | +| includes | A list of include filters to apply | list | `` | no | +| index\_page | HTML index page | string | `index.html` | no | +| object\_expiration | Configure expiry of old verions (days) | string | `90` | no | +| region | AWS default region | string | - | yes | +| routing\_rules | - | string | `` | no | +| source\_cidrs | Restrict site access to the specified CIDR blocks of IP addresses | list | `` | no | +| version\_enabled | Enable object versioning | string | `true` | no | + diff --git a/aws-s3/website/main.tf b/aws-s3/website/main.tf new file mode 100644 index 00000000..4cea13a7 --- /dev/null +++ b/aws-s3/website/main.tf @@ -0,0 +1,95 @@ +/** + * # AWS S3 website configuration + * + * Provision a static website using an S3 bucket in AWS. + */ +data "aws_caller_identity" "current" {} + +data "aws_s3_bucket" "access_log" { + bucket = replace(var.access_log_bucket, "/\\A\\z/", format("%s-access-logs", data.aws_caller_identity.current.account_id)) +} + +data "aws_iam_policy_document" "bucket_policy" { + statement { + sid = "PublicReadGetObject" + effect = "Allow" + principals { + identifiers = ["*"] + type = "AWS" + } + actions = ["s3:GetObject"] + resources = ["arn:aws:s3:::${var.fqdn}/*"] + condition { + test = "IpAddress" + variable = "aws:SourceIp" + values = var.source_cidrs + } + } +} + +data "archive_file" "content" { + output_path = "content.zip" + type = "zip" + source_dir = var.content_path +} + +data "aws_route53_zone" "primary" { + name = "${local.hosted_zone}." +} + +resource "aws_s3_bucket" "website" { + bucket = var.fqdn + acl = "public-read" + policy = data.aws_iam_policy_document.bucket_policy.json + website { + index_document = var.index_page + error_document = var.error_page + routing_rules = var.routing_rules + } + logging { + target_bucket = data.aws_s3_bucket.access_log.id + target_prefix = "${var.fqdn}/" + } + versioning { + enabled = var.version_enabled + } + lifecycle_rule { + id = "expire_old_versions" + enabled = var.version_enabled + noncurrent_version_expiration { + days = var.object_expiration + } + } +} + +resource "null_resource" "content_sync" { + triggers = { + content_path = sha256(var.content_path) + includes = sha256(join(",", var.includes)) + excludes = sha256(join(",", var.excludes)) + delete_flag = sha256(var.delete) + content_hash = data.archive_file.content.output_sha + } + provisioner "local-exec" { + command = < 0 ? local.excludes_string : ""} \ + ${length(var.includes) > 0 ? local.includes_string : ""} \ + ${var.delete == "true" ? "--delete" : ""} \ + ${var.content_path} s3://${aws_s3_bucket.website.id} +EOF + } + depends_on = ["aws_s3_bucket.website"] +} + +resource "aws_route53_record" "www" { + count = replace(replace(var.create_route53_record, "/true/", 1), "/false/", 0) + zone_id = data.aws_route53_zone.primary.zone_id + name = var.fqdn + type = "A" + alias { + evaluate_target_health = false + name = aws_s3_bucket.website.website_domain + zone_id = aws_s3_bucket.website.hosted_zone_id + } +} diff --git a/aws-s3/website/vars.tf b/aws-s3/website/vars.tf new file mode 100644 index 00000000..bc82cada --- /dev/null +++ b/aws-s3/website/vars.tf @@ -0,0 +1,71 @@ +variable "fqdn" { + description = "Website domain" +} + +variable "version_enabled" { + description = "Enable object versioning" + default = "true" +} + +variable "object_expiration" { + description = "Configure expiry of old verions (days)" + default = "90" +} + +variable "source_cidrs" { + description = "Restrict site access to the specified CIDR blocks of IP addresses" + type = "list" + default = ["0.0.0.0/0"] +} + +variable "access_log_bucket" { + description = "An S3 bucket used as a target for access logs" + default = "" +} + +variable "index_page" { + description = "HTML index page" + default = "index.html" +} + +variable "error_page" { + description = "HTML error page" + default = "error.html" +} + +variable "routing_rules" { + default = "" +} + +variable "content_path" { + description = "Root path of local website content" + default = "." +} + +variable "includes" { + description = "A list of include filters to apply" + type = "list" + default = [] +} + +variable "excludes" { + description = "A list of exclude filters to apply" + type = "list" + default = ["*"] +} + +variable "delete" { + description = "Remove files from the destination that don't exist in the source" + default = "false" +} + +variable "create_route53_record" { + description = "Boolean value to indicate whether route53 record is created" + default = "true" +} + +locals { + excludes_string = "--exclude \"${join("\" --exclude \"", var.excludes)}\"" + includes_string = "--include \"${join("\" --include \"", var.includes)}\"" + hosted_zone = "${join(".", slice(split(".", var.fqdn), 1, length(split(".", var.fqdn))))}" +} diff --git a/aws-secretsmanager/README.md b/aws-secretsmanager/README.md new file mode 100644 index 00000000..c4c10d2b --- /dev/null +++ b/aws-secretsmanager/README.md @@ -0,0 +1,10 @@ +# AWS Application Environment + +Logical grouping of resource for an application environment in an AWS account. + +Features: + +- Secrets management +- App config +- ECS Cluster +- ... diff --git a/blueprints/tfe/organization/backend.tf b/aws-secretsmanager/backend.tf similarity index 63% rename from blueprints/tfe/organization/backend.tf rename to aws-secretsmanager/backend.tf index 3352858f..1466e53b 100644 --- a/blueprints/tfe/organization/backend.tf +++ b/aws-secretsmanager/backend.tf @@ -1,7 +1,7 @@ terraform { backend "remote" { workspaces { - prefix = "tfe-organization-" + prefix = "aws-env-" } } } diff --git a/aws-secretsmanager/main.tf b/aws-secretsmanager/main.tf new file mode 100644 index 00000000..9a5c18dc --- /dev/null +++ b/aws-secretsmanager/main.tf @@ -0,0 +1,7 @@ +/** + * Provision resources for an AWS application environment. + * + * - KMS CMK used for encrypting application secrets. + * - Application configuration + * - ECS cluster where required + */ diff --git a/aws-secretsmanager/vars.tf b/aws-secretsmanager/vars.tf new file mode 100644 index 00000000..752c80a6 --- /dev/null +++ b/aws-secretsmanager/vars.tf @@ -0,0 +1,17 @@ +variable "secrets_admins" { + description = "A list of users that can administer encrypted secrets" + type = list(string) + default = [] +} + +variable "secrets_users" { + description = "A list of users that can access encrypted secrets" + type = list(string) + default = [] +} + +variable "capabilities" { + description = "A list of capabilities enabled for the application environment" + type = list(string) + default = [] +} diff --git a/aws-ses/import/README.md b/aws-ses/import/README.md new file mode 100644 index 00000000..ce4ef4e0 --- /dev/null +++ b/aws-ses/import/README.md @@ -0,0 +1,11 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| bucket\_name | The name of the S3 bucket to import into | string | - | yes | +| function\_name | A unique name used to reference the function | string | - | yes | +| import\_timeout | The maximum time (seconds) to allow the import job to execute | string | `30` | no | +| lambda\_path | The root path to lambda function source | string | `lambda` | no | +| region | AWS default region | string | - | yes | + diff --git a/aws-ses/import/lambda/build.gradle b/aws-ses/import/lambda/build.gradle new file mode 100644 index 00000000..fa654e02 --- /dev/null +++ b/aws-ses/import/lambda/build.gradle @@ -0,0 +1,19 @@ +plugins { + id 'groovy' +} + +repositories { + mavenCentral() +} + +dependencies { + compile "org.codehaus.groovy:groovy-all:$groovyVersion" + compile "com.amazonaws:aws-java-sdk-lambda:$awsSdkVersion" +} + +task buildZip(type: Zip) { + from compileGroovy + from processResources +} + +build.dependsOn buildZip diff --git a/aws-ses/import/lambda/gradle.properties b/aws-ses/import/lambda/gradle.properties new file mode 100644 index 00000000..538ad0b4 --- /dev/null +++ b/aws-ses/import/lambda/gradle.properties @@ -0,0 +1,2 @@ +groovyVersion = 2.5.4 +awsSdkVersion = 1.11.545 diff --git a/aws-ses/import/lambda/src/main/groovy/MailboxImport.groovy b/aws-ses/import/lambda/src/main/groovy/MailboxImport.groovy new file mode 100644 index 00000000..e69de29b diff --git a/aws-ses/import/main.tf b/aws-ses/import/main.tf new file mode 100644 index 00000000..0fe3ae7a --- /dev/null +++ b/aws-ses/import/main.tf @@ -0,0 +1,30 @@ +data "aws_caller_identity" "current" {} + +data "archive_file" "import" { + output_path = "mailbox_import.zip" + type = "zip" + source_dir = "${var.lambda_path}" +} + +data "aws_iam_role" "import" { + name = "bedrock-mailbox-import-role" +} + +resource "aws_lambda_function" "csv_import" { + filename = "${data.archive_file.import.output_path}" + function_name = "${var.function_name}" + handler = "MailboxImportCsv.lambda_handler" + role = "${data.aws_iam_role.import.arn}" + runtime = "python3.6" + source_code_hash = "${data.archive_file.import.output_base64sha256}" + timeout = "${var.import_timeout}" + environment { + variables { + } + } +} + +resource "aws_cloudwatch_log_group" "csv_import" { + name = "lambda/${aws_lambda_function.csv_import.function_name}" + retention_in_days = 30 +} diff --git a/aws-ses/import/provider.tf b/aws-ses/import/provider.tf new file mode 100644 index 00000000..9aeacef9 --- /dev/null +++ b/aws-ses/import/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-ses-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/aws-ses/import/vars.tf b/aws-ses/import/vars.tf new file mode 100644 index 00000000..766fdc08 --- /dev/null +++ b/aws-ses/import/vars.tf @@ -0,0 +1,17 @@ +variable "lambda_path" { + description = "The root path to lambda function source" + default = "lambda" +} + +variable "function_name" { + description = "A unique name used to reference the function" +} + +variable "bucket_name" { + description = "The name of the S3 bucket to import into" +} + +variable "import_timeout" { + description = "The maximum time (seconds) to allow the import job to execute" + default = "30" +} diff --git a/aws-ses/mailbox/README.md b/aws-ses/mailbox/README.md new file mode 100644 index 00000000..7127b141 --- /dev/null +++ b/aws-ses/mailbox/README.md @@ -0,0 +1,9 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mailbox\_bucket | Name of S3 bucket where mailbox is stored | string | - | yes | +| mailbox\_id | A mailbox identifier | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/aws-ses/mailbox/main.tf b/aws-ses/mailbox/main.tf new file mode 100644 index 00000000..9b96bdc2 --- /dev/null +++ b/aws-ses/mailbox/main.tf @@ -0,0 +1,18 @@ +data "aws_caller_identity" "current" {} + +data "aws_s3_bucket" "mailboxes" { + bucket = "${data.aws_caller_identity.current.account_id}-${var.mailbox_bucket}" +} + +resource "aws_ses_receipt_rule" "mailbox" { + name = "${var.mailbox_id}" + rule_set_name = "default-rule-set" + recipients = ["${var.mailbox_id}"] + enabled = true + scan_enabled = true + s3_action { + position = 1 + bucket_name = "${data.aws_s3_bucket.mailboxes.bucket}" + object_key_prefix = "${var.mailbox_id}/inbox" + } +} diff --git a/aws-ses/mailbox/provider.tf b/aws-ses/mailbox/provider.tf new file mode 100644 index 00000000..9aeacef9 --- /dev/null +++ b/aws-ses/mailbox/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-ses-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/aws-ses/mailbox/vars.tf b/aws-ses/mailbox/vars.tf new file mode 100644 index 00000000..f9584a4a --- /dev/null +++ b/aws-ses/mailbox/vars.tf @@ -0,0 +1,7 @@ +variable "mailbox_bucket" { + description = "Name of S3 bucket where mailbox is stored" +} + +variable "mailbox_id" { + description = "A mailbox identifier" +} \ No newline at end of file diff --git a/aws-sns/topic/main.tf b/aws-sns/topic/main.tf new file mode 100644 index 00000000..c1594128 --- /dev/null +++ b/aws-sns/topic/main.tf @@ -0,0 +1,3 @@ +resource "aws_sns_topic" "topic" { + name = var.topic_name +} diff --git a/aws-ssm/resourcegroup/README.md b/aws-ssm/resourcegroup/README.md new file mode 100644 index 00000000..d2cb28b7 --- /dev/null +++ b/aws-ssm/resourcegroup/README.md @@ -0,0 +1,6 @@ +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| environment | Environment identifer for the group | string | - | yes | + diff --git a/aws-ssm/resourcegroup/main.tf b/aws-ssm/resourcegroup/main.tf new file mode 100644 index 00000000..4667bf65 --- /dev/null +++ b/aws-ssm/resourcegroup/main.tf @@ -0,0 +1,18 @@ +resource "aws_resourcegroups_group" "group" { + name = "${var.environment}-resources" + resource_query { + query = < /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/cloudformation ./ diff --git a/aws-vpc/cloudformation/main.tf b/aws-vpc/cloudformation/main.tf new file mode 100644 index 00000000..0d4e2683 --- /dev/null +++ b/aws-vpc/cloudformation/main.tf @@ -0,0 +1,18 @@ +/** + * # AWS VPC configuration + * + * Provision a VPC for an environment. + */ +data "aws_iam_role" "vpc_cloudformation" { + name = "bedrock-vpc-cloudformation" +} + +resource "aws_cloudformation_stack" "vpc" { + name = "${var.environment}-vpc" + iam_role_arn = "${data.aws_iam_role.vpc_cloudformation.arn}" + parameters { + CidrBlock = "${var.cidr_block}" + Environment = "${var.environment}" + } + template_body = "${file(format("%s/vpc.yml", var.cloudformation_path))}" +} diff --git a/aws-vpc/cloudformation/outputs.tf b/aws-vpc/cloudformation/outputs.tf new file mode 100644 index 00000000..83b15192 --- /dev/null +++ b/aws-vpc/cloudformation/outputs.tf @@ -0,0 +1,3 @@ +output "vpc_id" { + value = "${lookup(aws_cloudformation_stack.vpc.outputs, "VpcId")}" +} diff --git a/aws-vpc/cloudformation/vars.tf b/aws-vpc/cloudformation/vars.tf new file mode 100644 index 00000000..ed9d2f12 --- /dev/null +++ b/aws-vpc/cloudformation/vars.tf @@ -0,0 +1,12 @@ +variable "cloudformation_path" { + description = "The root path to cloudformation templates" + default = "cloudformation" +} + +variable "environment" { + description = "The name of the environment represented by the VPC" +} + +variable "cidr_block" { + description = "The CIDR block covered by the VPC. For example: 10.0.0.0/16" +} diff --git a/aws-vpc/main.tf b/aws-vpc/main.tf new file mode 100644 index 00000000..28abc356 --- /dev/null +++ b/aws-vpc/main.tf @@ -0,0 +1,16 @@ +/** + * Provision resources for an AWS application. + * + * - A VPC within the AWS tenancy to isolate application services + */ +module "vpc" { + source = "" +} + +module "subnets" { + source = "" +} + +module "security_groups" { + source = "" +} diff --git a/aws-vpc/natgateway/main.tf b/aws-vpc/natgateway/main.tf new file mode 100644 index 00000000..7558ec81 --- /dev/null +++ b/aws-vpc/natgateway/main.tf @@ -0,0 +1,36 @@ +data "aws_vpc" "tenant" { + default = "${var.vpc_default}" + tags = "${var.vpc_tags}" +} + +data "aws_subnet_ids" "public" { + vpc_id = "${data.aws_vpc.tenant.id}" +} + +data "aws_subnet_ids" "private" { + vpc_id = "${data.aws_vpc.tenant.id}" +} + +resource "aws_eip" "natgw" {} + +resource "aws_nat_gateway" "natgw" { + allocation_id = "${aws_eip.natgw.id}" + subnet_id = "${data.aws_subnet_ids.public.ids[0]}" +} + +resource "aws_route_table" "private" { + vpc_id = "${data.aws_vpc.tenant.id}" + route { + cidr_block = "0.0.0.0/0" + gateway_id = "${aws_nat_gateway.natgw.id}" + } + tags { + Name = "private_routes" + } +} + +resource "aws_route_table_association" "private" { + count = "${length(data.aws_subnet_ids.private.ids)}" + route_table_id = "${aws_route_table.private.id}" + subnet_id = "${data.aws_subnet_ids.private.ids[count.index]}" +} diff --git a/aws-vpc/natgateway/vars.tf b/aws-vpc/natgateway/vars.tf new file mode 100644 index 00000000..9a38d43f --- /dev/null +++ b/aws-vpc/natgateway/vars.tf @@ -0,0 +1,10 @@ +variable "vpc_default" { + description = "Boolean value to indicate whether the matched VPC should be default for the region" + default = "true" +} + +variable "vpc_tags" { + type = "map" + description = "A map of tags to match on the VPC lookup" + default = {} +} diff --git a/aws-vpc/provider.tf b/aws-vpc/provider.tf new file mode 100644 index 00000000..d17183c1 --- /dev/null +++ b/aws-vpc/provider.tf @@ -0,0 +1,15 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-vpc-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/aws-vpc/proxy/main.tf b/aws-vpc/proxy/main.tf new file mode 100644 index 00000000..b56112b6 --- /dev/null +++ b/aws-vpc/proxy/main.tf @@ -0,0 +1,27 @@ +data "aws_vpc" "tenant" { + default = "${var.vpc_default}" + tags = "${var.vpc_tags}" +} + +data "aws_subnet_ids" "private" { + vpc_id = "${data.aws_vpc.tenant.id}" +} + +resource "aws_eip" "natgw" {} + +resource "aws_route_table" "private" { + vpc_id = "${data.aws_vpc.tenant.id}" + route { + cidr_block = "0.0.0.0/0" + network_interface_id = "${var.proxy_id}" + } + tags { + Name = "private_routes" + } +} + +resource "aws_route_table_association" "private" { + count = "${length(data.aws_subnet_ids.private.ids)}" + route_table_id = "${aws_route_table.private.id}" + subnet_id = "${data.aws_subnet_ids.private.ids[count.index]}" +} diff --git a/aws-vpc/proxy/vars.tf b/aws-vpc/proxy/vars.tf new file mode 100644 index 00000000..05d0361d --- /dev/null +++ b/aws-vpc/proxy/vars.tf @@ -0,0 +1,14 @@ +variable "vpc_default" { + description = "Boolean value to indicate whether the matched VPC should be default for the region" + default = "true" +} + +variable "vpc_tags" { + type = map(any) + description = "A map of tags to match on the VPC lookup" + default = {} +} + +variable "proxy_id" { + description = "Network interface of proxy used for Internet access" +} diff --git a/aws-vpc/subnets/main.tf b/aws-vpc/subnets/main.tf new file mode 100644 index 00000000..95ada31a --- /dev/null +++ b/aws-vpc/subnets/main.tf @@ -0,0 +1,30 @@ +data "aws_availability_zones" "available" {} + +data "aws_vpc" "tenant" { + default = var.vpc_default + tags = var.vpc_tags +} + +data "aws_route_table" "subnet_routes" { + filter { + name = "tag:Name" + values = [var.route_table] + } +} + +resource "aws_subnet" "subnets" { + count = length(data.aws_availability_zones.available.zone_ids) + availability_zone_id = data.aws_availability_zones.available.zone_ids[count.index] + map_public_ip_on_launch = var.is_public + vpc_id = data.aws_vpc.tenant.id + cidr_block = cidrsubnet(data.aws_vpc.tenant.cidr_block, var.newbits, count.index) + tags { + Name = format("private_subnet_%s", substr(data.aws_availability_zones.available.names[count.index], -2, -1)) + } +} + +resource "aws_route_table_association" "subnet_routes_association" { + count = length(data.aws_availability_zones.available.zone_ids) + subnet_id = aws_subnet.subnets.*.id[count.index] + route_table_id = data.aws_route_table.subnet_routes.id +} diff --git a/aws-vpc/subnets/vars.tf b/aws-vpc/subnets/vars.tf new file mode 100644 index 00000000..5f163642 --- /dev/null +++ b/aws-vpc/subnets/vars.tf @@ -0,0 +1,34 @@ +variable "vpc_default" { + description = "Boolean value to indicate whether the matched VPC should be default for the region" + default = "true" +} + +variable "vpc_tags" { + type = "map" + description = "A map of tags to match on the VPC lookup" + default = {} +} + +variable "cidr_prefix" { + description = "The CIDR block" +} + +variable "is_public" { + description = "Indicates whether the subnets should be publically routable (via Internet)" + default = false +} + +variable "newbits" { + description = <` | no | -| includes | A list of include filters to apply | list | `` | no | -| object\_expires | Number of days before object expiration | string | `0` | no | -| region | AWS default region | string | - | yes | -| version\_enabled | Enable object versioning | string | `true` | no | - diff --git a/blueprints/bastion/do/Dockerfile b/blueprints/bastion/do/Dockerfile deleted file mode 100644 index b45f9642..00000000 --- a/blueprints/bastion/do/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM bedrock/blueprint-base - -ADD export.sh /bootstrap/ - -ADD *.tf /bootstrap/ -ADD templates /bootstrap/templates - -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/blueprints/bastion/openstack/Dockerfile b/blueprints/bastion/openstack/Dockerfile deleted file mode 100644 index b45f9642..00000000 --- a/blueprints/bastion/openstack/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM bedrock/blueprint-base - -ADD export.sh /bootstrap/ - -ADD *.tf /bootstrap/ -ADD templates /bootstrap/templates - -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/blueprints/digitalocean/networking/.terraform.lock.hcl b/blueprints/digitalocean/networking/.terraform.lock.hcl new file mode 100644 index 00000000..0b254ad0 --- /dev/null +++ b/blueprints/digitalocean/networking/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/digitalocean/digitalocean" { + version = "2.6.0" + constraints = "2.6.0" + hashes = [ + "h1:/voJ6wIK/G8S/E7zLPvgweKMYGTC0ZcP45ei2gf/IFk=", + "h1:P1C7e6RlhLpi6KuE/sMruDdM5zZisJwMuKGbnxg8tAw=", + "zh:088c2a4eb9579947d50d8bcd722e75f2f1839acae302c8d43133b1da9926dae3", + "zh:323ba833d011371ca6d953752b133c0acad6462176cd2f804077a5f9d892cd2e", + "zh:3fbc64f1fabe57b6df49511c0d8753f1bbf776d5824ba060a51961d2a4265097", + "zh:4c90a933e23288ee2db2228e4e30055882d91bed831c2191cbecd849b27e44cb", + "zh:62f1cf4c82e5fcaf1a17e39cb96638f006b303758813a6c5ecb08bc93cd93364", + "zh:68ad1354e9f925477dc41e658e84a4996ba662920bbc61a2680235b94811169b", + "zh:9119b573c59429c2dfacb7d95b39c4e021783b8281ecd68f1621ad4a17c112cd", + "zh:9c15e3660f2399c25ee3ad53bd54927a6529d1393a54f1e1c2a523e0369dea46", + "zh:bc88f68bf6a6b5e803734f06731e31d61a5977ed1a638bfe102a54094c4d4030", + "zh:c2b013a5d7e60b31211b0f8c0dd898840b8f1aa7225318da05def33b5edb9388", + "zh:e46e21f6ffa7aac11ade8ab4b87a28ac405ef40a35793cef1f1fd6db6d8e5a0a", + "zh:e879643369e03abc192fbcf7ab06611bb8f36d37ceb5641ba05d58869f10ab7c", + "zh:ee9b56400e545ce1805842b795179a004313b8a947bd8f3490f5c5a0cb7703e5", + "zh:fb44861ae0b58b594aa4e565e0ed06bce939753b14a20b4abd3e8276e839e7a7", + ] +} diff --git a/blueprints/digitalocean/networking/Makefile b/blueprints/digitalocean/networking/Makefile new file mode 100644 index 00000000..6c8bc7b8 --- /dev/null +++ b/blueprints/digitalocean/networking/Makefile @@ -0,0 +1,64 @@ +SHELL:=/bin/bash +TF_WORKSPACE?=micronode + +ifneq (, $(shell which docker)) +TERRAFORM_VERSION=0.14.7 +TERRAFORM=docker run --rm -v "${PWD}:/work" -v "${HOME}/.ssh:/root/.ssh" -v "${HOME}/.aws:/root/.aws" -v "${HOME}/.terraformrc:/root/.terraformrc" -e AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) -e http_proxy=$(http_proxy) --net=host -w /work hashicorp/terraform:$(TERRAFORM_VERSION) +else +TERRAFORM=terraform +endif + +TERRAFORM_DOCS=docker run --rm -v "${PWD}:/work" tmknom/terraform-docs + +CHECKOV=docker run --rm -v "${PWD}:/work" bridgecrew/checkov + +TFSEC=docker run --rm -v "${PWD}:/work" liamg/tfsec + +DIAGRAMS=docker run -v "${PWD}:/work" figurate/diagrams python + +.PHONY: all clean init validate test docs format list new select plan apply + +WORKSPACE=$(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + +all: test docs format + +clean: + rm -rf .terraform/ + +init: + $(TERRAFORM) init -upgrade + +validate: init + $(TERRAFORM) validate + +test: validate + $(CHECKOV) -d /work + + $(TFSEC) /work + +diagram: + $(DIAGRAMS) diagram.py + +docs: diagram + $(TERRAFORM_DOCS) markdown ./ >./README.md + +format: + $(TERRAFORM) fmt -list=true ./ + +list: + $(TERRAFORM) workspace list + +new: + $(TERRAFORM) workspace new $(WORKSPACE) + +select: + $(TERRAFORM) workspace select $(WORKSPACE) + +workspace: + $(TERRAFORM) workspace show + +plan: + $(TERRAFORM) plan -input=false -var-file=$(TF_WORKSPACE).tfvars + +apply: + $(TERRAFORM) apply -input=false -var-file=$(TF_WORKSPACE).tfvars $(TF_ARGS) \ No newline at end of file diff --git a/blueprints/digitalocean/networking/backend.tf b/blueprints/digitalocean/networking/backend.tf new file mode 100644 index 00000000..3198f1b2 --- /dev/null +++ b/blueprints/digitalocean/networking/backend.tf @@ -0,0 +1,8 @@ +terraform { + backend "remote" { + organization="micronode" + workspaces { + prefix = "do-networking-" + } + } +} diff --git a/blueprints/digitalocean/networking/firewall.tf b/blueprints/digitalocean/networking/firewall.tf new file mode 100644 index 00000000..e051a85b --- /dev/null +++ b/blueprints/digitalocean/networking/firewall.tf @@ -0,0 +1,8 @@ +module "firewall_egress" { + source = "figurate/firewall/digitalocean" + count = length(var.firewalls) + + name = var.firewalls[count.index][0] + ingress_rules = var.firewalls[count.index][1] + egress_rules = var.firewalls[count.index][2] +} diff --git a/blueprints/storage/bucket/do/provider.tf b/blueprints/digitalocean/networking/provider.tf similarity index 100% rename from blueprints/storage/bucket/do/provider.tf rename to blueprints/digitalocean/networking/provider.tf diff --git a/blueprints/digitalocean/networking/vars.tf b/blueprints/digitalocean/networking/vars.tf new file mode 100644 index 00000000..5d386eae --- /dev/null +++ b/blueprints/digitalocean/networking/vars.tf @@ -0,0 +1,5 @@ +variable "firewalls" { + description = "A list of firewall rule configs" + type = list(tuple([string, list(string), list(string)])) + default = [] +} diff --git a/blueprints/digitalocean/networking/versions.tf b/blueprints/digitalocean/networking/versions.tf new file mode 100644 index 00000000..4514993b --- /dev/null +++ b/blueprints/digitalocean/networking/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = "2.6.0" + } + } +} diff --git a/blueprints/docker/engine/do/Dockerfile b/blueprints/docker/engine/do/Dockerfile deleted file mode 100644 index b45f9642..00000000 --- a/blueprints/docker/engine/do/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM bedrock/blueprint-base - -ADD export.sh /bootstrap/ - -ADD *.tf /bootstrap/ -ADD templates /bootstrap/templates - -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/blueprints/ecs/README.md b/blueprints/ecs/README.md new file mode 100644 index 00000000..20720935 --- /dev/null +++ b/blueprints/ecs/README.md @@ -0,0 +1,4 @@ +# Elastic Container Service (ECS) Blueprints + +This directory contains blueprints associated with maintaining +ECS services. diff --git a/blueprints/ecs/task-definition/.dockerignore b/blueprints/ecs/task-definition/.dockerignore new file mode 100644 index 00000000..79655ca1 --- /dev/null +++ b/blueprints/ecs/task-definition/.dockerignore @@ -0,0 +1 @@ +backend.tf diff --git a/blueprints/ecs/task-definition/.gitignore b/blueprints/ecs/task-definition/.gitignore new file mode 100644 index 00000000..546edd5d --- /dev/null +++ b/blueprints/ecs/task-definition/.gitignore @@ -0,0 +1,2 @@ +*.json +backend.tf diff --git a/blueprints/aws/ecs-task-definition/Makefile b/blueprints/ecs/task-definition/Makefile similarity index 55% rename from blueprints/aws/ecs-task-definition/Makefile rename to blueprints/ecs/task-definition/Makefile index 1893e7a8..ef6c19f6 100644 --- a/blueprints/aws/ecs-task-definition/Makefile +++ b/blueprints/ecs/task-definition/Makefile @@ -14,9 +14,8 @@ DIAGRAMS=docker run -v "${PWD}:/work" figurate/diagrams python NAME=$(wordlist 2, $(words $(MAKECMDGOALS)), $(MAKECMDGOALS)) -DOCKER_REGISTRY?=bedrock -DOCKER_IMAGE?=aws-`basename $(PWD)` -DOCKER_TAGS?=latest +REGISTRY?=bedrock +TAGS?=latest BUILD_ARGS?= .PHONY: all build tag push @@ -24,13 +23,13 @@ BUILD_ARGS?= all: build clean: - docker rmi $(DOCKER_REGISTRY)/$(DOCKER_IMAGE) + docker rmi $(REGISTRY)/ecs-task-definition build: - docker build -t $(DOCKER_REGISTRY)/$(DOCKER_IMAGE) ${BUILD_ARGS} --build-arg TERRAFORM_VERSION=$(TERRAFORM_VERSION) --build-arg HTTP_PROXY=${http_proxy} --network=host . + docker build -t $(REGISTRY)/ecs-task-definition ${BUILD_ARGS} --build-arg TERRAFORM_VERSION=$(TERRAFORM_VERSION) --build-arg HTTP_PROXY=${http_proxy} --network=host . tag: build - echo $(DOCKER_TAGS) | tr "/," "-\n" | xargs -n1 -I % docker tag $(DOCKER_REGISTRY)/$(DOCKER_IMAGE) $(DOCKER_REGISTRY)/$(DOCKER_IMAGE):% + echo $(TAGS) | tr "/," "-\n" | xargs -n1 -I % docker tag $(REGISTRY)/ecs-task-definition $(REGISTRY)/ecs-task-definition:% push: tag - echo $(DOCKER_TAGS) | tr "/," "-\n" | xargs -n1 -I % docker push $(DOCKER_REGISTRY)/$(DOCKER_IMAGE):% + echo $(TAGS) | tr "/," "-\n" | xargs -n1 -I % docker push $(REGISTRY)/ecs-task-definition:% diff --git a/blueprints/aws/ecs-task-definition/main.tf b/blueprints/ecs/task-definition/main.tf similarity index 91% rename from blueprints/aws/ecs-task-definition/main.tf rename to blueprints/ecs/task-definition/main.tf index ee92cf55..b6cc46ee 100644 --- a/blueprints/aws/ecs-task-definition/main.tf +++ b/blueprints/ecs/task-definition/main.tf @@ -1,5 +1,5 @@ resource "aws_cloudwatch_log_group" "log" { - name = "ecs/nginx" + name = "ecs/${var.name}" retention_in_days = 14 } @@ -7,7 +7,7 @@ module "task_definition" { source = "figurate/ecs-task-definition/aws" execution_role = null - image = "nginx" + image = var.image image_tag = var.image_tag memory = 64 log_group = aws_cloudwatch_log_group.log.name diff --git a/blueprints/aws/ecs-task-definition/vars.tf b/blueprints/ecs/task-definition/vars.tf similarity index 89% rename from blueprints/aws/ecs-task-definition/vars.tf rename to blueprints/ecs/task-definition/vars.tf index bba507d7..cb1d6897 100644 --- a/blueprints/aws/ecs-task-definition/vars.tf +++ b/blueprints/ecs/task-definition/vars.tf @@ -1,11 +1,14 @@ variable "name" { - description = "A name to identify the ECS service" - default = "nginx" + description = "ECS service name" +} + +variable "image" { + description = "ECR registry name" } variable "namespace" { description = "Provides a context for the intended deployment of the Task Definition (e.g. environment, etc.)" - default = "example" + default = null } variable "image_tag" { @@ -16,9 +19,7 @@ variable "image_tag" { variable "ports" { description = "A list of port mappings to publish" type = list(tuple([number, number])) - default = [ - [80, 80], [9090, 9090] - ] + default = [] } variable "network_mode" { diff --git a/blueprints/hashicorp/vault/do/Dockerfile b/blueprints/hashicorp/vault/do/Dockerfile deleted file mode 100644 index b45f9642..00000000 --- a/blueprints/hashicorp/vault/do/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM bedrock/blueprint-base - -ADD export.sh /bootstrap/ - -ADD *.tf /bootstrap/ -ADD templates /bootstrap/templates - -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/blueprints/kubernetes/README.md b/blueprints/kubernetes/README.md new file mode 100644 index 00000000..5bdf26e3 --- /dev/null +++ b/blueprints/kubernetes/README.md @@ -0,0 +1,4 @@ +# Kubernetes Blueprints + +This directory contains blueprints associated with maintaining +Kubernetes services. diff --git a/blueprints/nginx/vhost/provider.tf b/blueprints/nginx/provider.tf similarity index 100% rename from blueprints/nginx/vhost/provider.tf rename to blueprints/nginx/provider.tf diff --git a/blueprints/nginx/reverseproxy/do/Dockerfile b/blueprints/nginx/reverseproxy/do/Dockerfile deleted file mode 100644 index b45f9642..00000000 --- a/blueprints/nginx/reverseproxy/do/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM bedrock/blueprint-base - -ADD export.sh /bootstrap/ - -ADD *.tf /bootstrap/ -ADD templates /bootstrap/templates - -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/blueprints/openstack/compute/export.sh b/blueprints/openstack/compute/export.sh new file mode 100755 index 00000000..4d8769c8 --- /dev/null +++ b/blueprints/openstack/compute/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/templates ./ diff --git a/blueprints/nginx/reverseproxy/openstack/provider.tf b/blueprints/openstack/compute/provider.tf similarity index 100% rename from blueprints/nginx/reverseproxy/openstack/provider.tf rename to blueprints/openstack/compute/provider.tf diff --git a/blueprints/openstack/compute/templates/centos7.yml b/blueprints/openstack/compute/templates/centos7.yml new file mode 100644 index 00000000..e5151d35 --- /dev/null +++ b/blueprints/openstack/compute/templates/centos7.yml @@ -0,0 +1,13 @@ +#!/bin/sh +yum install -y epel-release && yum check-update -y +yum install -y curl python chrony nginx python2-certbot-nginx + +printf '\nClientAliveInterval 100\nClientAliveCountMax 0' >> /etc/ssh/sshd_config +service sshd restart + +systemctl start chronyd && systemctl enable chronyd + +# NGINX install +systemctl start nginx && systemctl enable nginx +curl -sS -L -O https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh +API_KEY="${NginxAmplifyKey}" AMPLIFY_HOSTNAME="${NginxHostname}" sh ./install.sh -y diff --git a/blueprints/openstack/compute/templates/ubuntu.yml b/blueprints/openstack/compute/templates/ubuntu.yml new file mode 100644 index 00000000..89fd2a5e --- /dev/null +++ b/blueprints/openstack/compute/templates/ubuntu.yml @@ -0,0 +1,42 @@ +#cloud-config +apt: + sources: + certbot: + source: ppa:certbot/certbot + +packages: + - nginx + - unattended-upgrades + - curl + - ntpdate + - python + - python-certbot-nginx + +timezone: Australia/Melbourne + +ntp: + enabled: true + servers: + - 0.au.pool.ntp.org + - 1.au.pool.ntp.org + - 2.au.pool.ntp.org + - 3.au.pool.ntp.org + +write_files: + - content: | + server { + listen 127.0.0.1:80; + server_name 127.0.0.1; + location /nginx_status { + stub_status on; + allow 127.0.0.1; + deny all; + } + } + path: /etc/nginx/conf.d/stub_status.conf + +runcmd: + - export API_KEY="${NginxAmplifyKey}" + - curl -L https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh | bash + - curl -O https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb + - dpkg -i -E ./amazon-cloudwatch-agent.deb diff --git a/blueprints/rancher/README.md b/blueprints/rancher/README.md new file mode 100644 index 00000000..3f7508a1 --- /dev/null +++ b/blueprints/rancher/README.md @@ -0,0 +1,4 @@ +# Rancher Blueprints + +This directory contains blueprints associated with maintaining +Rancher services. diff --git a/blueprints/rancher/env/provider.tf b/blueprints/rancher/provider.tf similarity index 100% rename from blueprints/rancher/env/provider.tf rename to blueprints/rancher/provider.tf diff --git a/blueprints/rancher/server/do/Dockerfile b/blueprints/rancher/server/do/Dockerfile deleted file mode 100644 index b45f9642..00000000 --- a/blueprints/rancher/server/do/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM bedrock/blueprint-base - -ADD export.sh /bootstrap/ - -ADD *.tf /bootstrap/ -ADD templates /bootstrap/templates - -ENV TF_VAR_template_path="/bootstrap/templates" diff --git a/blueprints/s3/README.md b/blueprints/s3/README.md new file mode 100644 index 00000000..f9b53c5d --- /dev/null +++ b/blueprints/s3/README.md @@ -0,0 +1,4 @@ +# Simple Storage Service (S3) Blueprints + +This directory contains blueprints associated with maintaining +AWS S3 buckets for object storage. diff --git a/blueprints/storage/bucket/aws/README.md b/blueprints/storage/bucket/aws/README.md deleted file mode 100644 index 8bf2ecbf..00000000 --- a/blueprints/storage/bucket/aws/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# AWS S3 bucket configuration - -Provision an S3 bucket in AWS. - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|:----:|:-----:|:-----:| -| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | -| bucket\_acl | Access control for bucket | string | `private` | no | -| bucket\_name | Name of S3 bucket | string | - | yes | -| object\_expires | Number of days before object expiration | string | `0` | no | -| region | AWS default region | string | - | yes | -| version\_enabled | Enable object versioning | string | `true` | no | - diff --git a/blueprints/storage/bucket/aws/vars.tf b/blueprints/storage/bucket/aws/vars.tf deleted file mode 100644 index 9d657b21..00000000 --- a/blueprints/storage/bucket/aws/vars.tf +++ /dev/null @@ -1,23 +0,0 @@ -variable "bucket_name" { - description = "Name of S3 bucket" -} - -variable "bucket_acl" { - description = "Access control for bucket" - default = "private" -} - -variable "version_enabled" { - description = "Enable object versioning" - default = "true" -} - -variable "object_expires" { - description = "Number of days before object expiration" - default = "0" -} - -variable "restrict_public_access" { - description = "Indicates whether to block public access to this bucket" - default = "true" -} diff --git a/blueprints/terraform/.terraform.lock.hcl b/blueprints/terraform/.terraform.lock.hcl new file mode 100644 index 00000000..29b950ec --- /dev/null +++ b/blueprints/terraform/.terraform.lock.hcl @@ -0,0 +1,39 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "3.33.0" + hashes = [ + "h1:4+2CO4Pb3BKwI0MD+DBmnX5GFsYTs47y6w4/kQbPJIQ=", + "h1:UJcZV5+xJmHHDCsm+s8+xMonccZvVD0jdGwHAoi7nJg=", + "zh:0e89b10323a59de9dd6f286423cc172cb1733683d654c886493c3bd4e43e6290", + "zh:288df55f0f4fac1e920cfa61616ac42a4e4414bd7a637902db03d0c7101f14ca", + "zh:303c9136c5bf97e6c1deda6e27f0d0931fe0eaaab547bf219b996623fb0ad522", + "zh:457a5da9f323e2781942df534153d000ea81727798ee0771177009d84b04aad7", + "zh:857fa3e29cc25ace76556a5edfded41628a3380cebf457e627576a83084852f8", + "zh:85e1eb383372f834630fac7b02ec9ae1e33d24d61cf5a7d832583a16e6b5add4", + "zh:9dd01eb05ac73146ac5f25421b7683fe4bffec23e408162887e1265f9bfe8462", + "zh:b1561e1335754ec93a54f45c18dc1cab70f38bc08adf244d793791134f5641ef", + "zh:bb96f57b80e3d94ee4bc05a5450fdd796424272b46cfc67ff9d094d5316c5fac", + "zh:e4ce241d8b5dd1124dc0f1da6c0840ab777de8717dac6e76afbbad9883f5ce34", + "zh:f2b292e813844d6d611db89017fc420ac05f2e3b25324e3c893481d375e23396", + ] +} + +provider "registry.terraform.io/hashicorp/tfe" { + version = "0.24.0" + hashes = [ + "h1:/xAhOM8mvJSd9WHTFSlOLjy+gh5W+ziYOKVcJkYWnQE=", + "h1:C1EammzxSTW3uUt/y27Mq6le0PMiFyQtoM//C3y7aL8=", + "zh:09cfec59a8da3af5fa7b546d13c4319f56edf3e00d33f68f31d3530433319ac3", + "zh:1235e6d9d8dbbfed8683e729b9df47d112fdb92fef27886fcd5bc58623feaeae", + "zh:1d8cd1cefb3fd0ce7adffd84683da280872187f1ae2f812038e6476351426452", + "zh:36cdc507ce4ba8369ea39027e15d88dff31a24c0d696ee8831cc4f3313fc417c", + "zh:50ad19d5c61bf9de5fb67765ab5b52d0355e17c6ee81fa960e666a311b470305", + "zh:50ff37d019d3ef904cc96128babcd2d0a2c56680ea8e564a67bcf330f169580d", + "zh:62e7f8aff523583b23dd506ecb7a5c44850a7c7cbf5577e510453dfe1fde2766", + "zh:a6b5cfa422f187bd2bd9f14d9c48fa18e5b118d9045c7fb2a95c0ab156b5def1", + "zh:d59267721e6695f61fcc71924acc6e960643f7d24e9d69be7cc4f43fba73e1dc", + "zh:f9a92a63337e4f02bec8d9dcd92431c4c51025b74b513af56ac5ebdaadcf6df5", + ] +} diff --git a/blueprints/terraform/Makefile b/blueprints/terraform/Makefile new file mode 100644 index 00000000..3232bc69 --- /dev/null +++ b/blueprints/terraform/Makefile @@ -0,0 +1,68 @@ +SHELL:=/bin/bash +AWS_DEFAULT_REGION?=ap-southeast-2 +AWS_ACCOUNT=`aws sts get-caller-identity | jq -r '.Account'` +TF_WORKSPACE=$(AWS_ACCOUNT) + +ifneq (, $(shell which docker)) +TERRAFORM_VERSION=0.14.7 +TERRAFORM=docker run --rm -v "${PWD}:/work" -v "${HOME}/.ssh:/root/.ssh" -v "${HOME}/.aws:/root/.aws" -v "${HOME}/.terraformrc:/root/.terraformrc" -e AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) -e http_proxy=$(http_proxy) --net=host -w /work hashicorp/terraform:$(TERRAFORM_VERSION) +else +TERRAFORM=terraform +endif + +TERRAFORM_DOCS=docker run --rm -v "${PWD}:/work" tmknom/terraform-docs + +CHECKOV=docker run --rm -v "${PWD}:/work" bridgecrew/checkov + +TFSEC=docker run --rm -v "${PWD}:/work" liamg/tfsec + +DIAGRAMS=docker run -v "${PWD}:/work" figurate/diagrams python + +.PHONY: all clean init validate test docs format list new select plan apply + +WORKSPACE=$(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + +all: test docs format + +clean: + rm -rf .terraform/ + +init: + $(TERRAFORM) init -upgrade + +validate: init + $(TERRAFORM) validate + +test: validate + $(CHECKOV) -d /work + + $(TFSEC) /work + +diagram: + $(DIAGRAMS) diagram.py + +docs: diagram + $(TERRAFORM_DOCS) markdown ./ >./README.md + +format: + $(TERRAFORM) fmt -list=true ./ && \ + $(TERRAFORM) fmt -list=true ./organization && \ + $(TERRAFORM) fmt -list=true ./s3-remote-state + +list: + $(TERRAFORM) workspace list + +new: + $(TERRAFORM) workspace new $(WORKSPACE) + +select: + $(TERRAFORM) workspace select $(WORKSPACE) + +workspace: + $(TERRAFORM) workspace show + +plan: + $(TERRAFORM) plan -input=false -var-file=$(WORKSPACE).tfvars + +apply: + $(TERRAFORM) apply -input=false -var-file=$(WORKSPACE).tfvars $(TF_ARGS) \ No newline at end of file diff --git a/blueprints/terraform/README.md b/blueprints/terraform/README.md new file mode 100644 index 00000000..8ff01eee --- /dev/null +++ b/blueprints/terraform/README.md @@ -0,0 +1,24 @@ +![Terraform](terraform.png) + +## Requirements + +No requirements. + +## Providers + +No provider. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| admin | Email address of the organization admin | `any` | `null` | no | +| backend\_type | Type of Terraform backend to provision | `string` | `"s3"` | no | +| membership | A list of organization team member email addresses | `list(string)` | `[]` | no | +| mfa\_delete | Enable MFA delete for versioned objects | `bool` | `false` | no | +| name | Name of the organization | `any` | `null` | no | + +## Outputs + +No output. + diff --git a/blueprints/terraform/diagram.py b/blueprints/terraform/diagram.py new file mode 100644 index 00000000..b004a7a6 --- /dev/null +++ b/blueprints/terraform/diagram.py @@ -0,0 +1,7 @@ +from diagrams import Diagram +from diagrams.onprem.iac import Terraform +from diagrams.aws.storage import S3 + +with Diagram("Terraform", show=False, direction="TB"): + Terraform("organization") + S3("terraform_state") diff --git a/blueprints/terraform/main.tf b/blueprints/terraform/main.tf new file mode 100644 index 00000000..97aad954 --- /dev/null +++ b/blueprints/terraform/main.tf @@ -0,0 +1,18 @@ +/* + * ![Terraform](terraform.png) + */ +module "s3_remote_state" { + source = "./s3-remote-state" + count = var.backend_type == "s3" ? 1 : 0 + + mfa_delete = var.mfa_delete +} + +module "tfe_organization" { + source = "./organization" + count = var.backend_type == "tfe" ? 1 : 0 + + name = var.name + admin = var.admin + membership = var.membership +} diff --git a/blueprints/terraform/organization/main.tf b/blueprints/terraform/organization/main.tf new file mode 100644 index 00000000..37cbd405 --- /dev/null +++ b/blueprints/terraform/organization/main.tf @@ -0,0 +1,7 @@ +module "organization" { + source = "figurate/organization/tfe" + + name = var.name + admin = var.admin + collaborator_auth_policy = "two_factor_mandatory" +} diff --git a/blueprints/tfe/organization/vars.tf b/blueprints/terraform/organization/vars.tf similarity index 100% rename from blueprints/tfe/organization/vars.tf rename to blueprints/terraform/organization/vars.tf diff --git a/blueprints/terraform/provider.tf b/blueprints/terraform/provider.tf new file mode 100644 index 00000000..b21d3b6a --- /dev/null +++ b/blueprints/terraform/provider.tf @@ -0,0 +1 @@ +provider "aws" {} diff --git a/blueprints/terraform/s3-remote-state/main.tf b/blueprints/terraform/s3-remote-state/main.tf new file mode 100644 index 00000000..fb745357 --- /dev/null +++ b/blueprints/terraform/s3-remote-state/main.tf @@ -0,0 +1,23 @@ +data "aws_caller_identity" "current" {} + +module "tf_state" { + source = "figurate/s3-bucket/aws" + + bucket = "${data.aws_caller_identity.current.account_id}-terraform-state" + versioned = true + encrypted = true + mfa_delete = var.mfa_delete +} + +resource "aws_dynamodb_table" "tf_lock" { + hash_key = "LockID" + name = "terraform-lock" + attribute { + name = "LockID" + type = "S" + } + point_in_time_recovery { + enabled = true + } + billing_mode = "PAY_PER_REQUEST" +} diff --git a/blueprints/terraform/s3-remote-state/vars.tf b/blueprints/terraform/s3-remote-state/vars.tf new file mode 100644 index 00000000..dbf7a645 --- /dev/null +++ b/blueprints/terraform/s3-remote-state/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_delete" { + description = "Enable MFA delete for versioned objects" + default = true +} diff --git a/blueprints/terraform/terraform.png b/blueprints/terraform/terraform.png new file mode 100644 index 00000000..f2031e0e Binary files /dev/null and b/blueprints/terraform/terraform.png differ diff --git a/blueprints/terraform/vars.tf b/blueprints/terraform/vars.tf new file mode 100644 index 00000000..abc08793 --- /dev/null +++ b/blueprints/terraform/vars.tf @@ -0,0 +1,25 @@ +variable "backend_type" { + description = "Type of Terraform backend to provision" + default = "s3" +} + +variable "name" { + description = "Name of the organization" + default = null +} + +variable "admin" { + description = "Email address of the organization admin" + default = null +} + +variable "membership" { + description = "A list of organization team member email addresses" + type = list(string) + default = [] +} + +variable "mfa_delete" { + description = "Enable MFA delete for versioned objects" + default = false +} diff --git a/blueprints/tfe/organization/Makefile b/blueprints/tfe/organization/Makefile deleted file mode 100644 index f0b616db..00000000 --- a/blueprints/tfe/organization/Makefile +++ /dev/null @@ -1,41 +0,0 @@ -SHELL:=/bin/bash -TERRAFORM_IMAGE=bedrock/tfe-organization -TERRAFORM_VERSION=latest -TERRAFORM=docker run --rm -v "${PWD}:/work" -v "${HOME}:/root" -e AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) -e http_proxy=$(http_proxy) --net=host -w /work $(TERRAFORM_IMAGE):$(TERRAFORM_VERSION) - -.PHONY: all clean test docs format list new select plan - -all: test docs format - -clean: - rm -rf .terraform/ - -init: - $(TERRAFORM) init -upgrade=true -backend-config=backend.tfvars - -test: init - $(TERRAFORM) validate - -docs: - docker run --rm -v "${PWD}:/work" tmknom/terraform-docs markdown ./ >./README.md - -format: - $(TERRAFORM) fmt -list=true ./ - -list: - $(TERRAFORM) workspace list - -new: - $(TERRAFORM) workspace new $(wordlist 2, $(words $(MAKECMDGOALS)), $(MAKECMDGOALS)) - -select: - $(TERRAFORM) workspace select $(wordlist 2, $(words $(MAKECMDGOALS)), $(MAKECMDGOALS)) - -workspace: - $(TERRAFORM) workspace show - -plan: init - $(TERRAFORM) plan -input=false - -apply: init - $(TERRAFORM) apply -input=false $(TF_ARGS) \ No newline at end of file diff --git a/digitalocean-droplet/.terraform.lock.hcl b/digitalocean-droplet/.terraform.lock.hcl new file mode 100644 index 00000000..37e7e475 --- /dev/null +++ b/digitalocean-droplet/.terraform.lock.hcl @@ -0,0 +1,41 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/digitalocean/digitalocean" { + version = "2.6.0" + constraints = "2.6.0" + hashes = [ + "h1:/voJ6wIK/G8S/E7zLPvgweKMYGTC0ZcP45ei2gf/IFk=", + "zh:088c2a4eb9579947d50d8bcd722e75f2f1839acae302c8d43133b1da9926dae3", + "zh:323ba833d011371ca6d953752b133c0acad6462176cd2f804077a5f9d892cd2e", + "zh:3fbc64f1fabe57b6df49511c0d8753f1bbf776d5824ba060a51961d2a4265097", + "zh:4c90a933e23288ee2db2228e4e30055882d91bed831c2191cbecd849b27e44cb", + "zh:62f1cf4c82e5fcaf1a17e39cb96638f006b303758813a6c5ecb08bc93cd93364", + "zh:68ad1354e9f925477dc41e658e84a4996ba662920bbc61a2680235b94811169b", + "zh:9119b573c59429c2dfacb7d95b39c4e021783b8281ecd68f1621ad4a17c112cd", + "zh:9c15e3660f2399c25ee3ad53bd54927a6529d1393a54f1e1c2a523e0369dea46", + "zh:bc88f68bf6a6b5e803734f06731e31d61a5977ed1a638bfe102a54094c4d4030", + "zh:c2b013a5d7e60b31211b0f8c0dd898840b8f1aa7225318da05def33b5edb9388", + "zh:e46e21f6ffa7aac11ade8ab4b87a28ac405ef40a35793cef1f1fd6db6d8e5a0a", + "zh:e879643369e03abc192fbcf7ab06611bb8f36d37ceb5641ba05d58869f10ab7c", + "zh:ee9b56400e545ce1805842b795179a004313b8a947bd8f3490f5c5a0cb7703e5", + "zh:fb44861ae0b58b594aa4e565e0ed06bce939753b14a20b4abd3e8276e839e7a7", + ] +} + +provider "registry.terraform.io/hashicorp/template" { + version = "2.2.0" + hashes = [ + "h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=", + "zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386", + "zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53", + "zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603", + "zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16", + "zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776", + "zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451", + "zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae", + "zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde", + "zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d", + "zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2", + ] +} diff --git a/digitalocean-droplet/Makefile b/digitalocean-droplet/Makefile new file mode 100644 index 00000000..6c8bc7b8 --- /dev/null +++ b/digitalocean-droplet/Makefile @@ -0,0 +1,64 @@ +SHELL:=/bin/bash +TF_WORKSPACE?=micronode + +ifneq (, $(shell which docker)) +TERRAFORM_VERSION=0.14.7 +TERRAFORM=docker run --rm -v "${PWD}:/work" -v "${HOME}/.ssh:/root/.ssh" -v "${HOME}/.aws:/root/.aws" -v "${HOME}/.terraformrc:/root/.terraformrc" -e AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) -e http_proxy=$(http_proxy) --net=host -w /work hashicorp/terraform:$(TERRAFORM_VERSION) +else +TERRAFORM=terraform +endif + +TERRAFORM_DOCS=docker run --rm -v "${PWD}:/work" tmknom/terraform-docs + +CHECKOV=docker run --rm -v "${PWD}:/work" bridgecrew/checkov + +TFSEC=docker run --rm -v "${PWD}:/work" liamg/tfsec + +DIAGRAMS=docker run -v "${PWD}:/work" figurate/diagrams python + +.PHONY: all clean init validate test docs format list new select plan apply + +WORKSPACE=$(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + +all: test docs format + +clean: + rm -rf .terraform/ + +init: + $(TERRAFORM) init -upgrade + +validate: init + $(TERRAFORM) validate + +test: validate + $(CHECKOV) -d /work + + $(TFSEC) /work + +diagram: + $(DIAGRAMS) diagram.py + +docs: diagram + $(TERRAFORM_DOCS) markdown ./ >./README.md + +format: + $(TERRAFORM) fmt -list=true ./ + +list: + $(TERRAFORM) workspace list + +new: + $(TERRAFORM) workspace new $(WORKSPACE) + +select: + $(TERRAFORM) workspace select $(WORKSPACE) + +workspace: + $(TERRAFORM) workspace show + +plan: + $(TERRAFORM) plan -input=false -var-file=$(TF_WORKSPACE).tfvars + +apply: + $(TERRAFORM) apply -input=false -var-file=$(TF_WORKSPACE).tfvars $(TF_ARGS) \ No newline at end of file diff --git a/digitalocean-droplet/README.md b/digitalocean-droplet/README.md new file mode 100644 index 00000000..3c6c0312 --- /dev/null +++ b/digitalocean-droplet/README.md @@ -0,0 +1,20 @@ +## Requirements + +| Name | Version | +|------|---------| +| digitalocean | 2.6.0 | + +## Providers + +No provider. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| droplets | A list of droplet configs | `list(tuple([string, string, string]))` | `[]` | no | + +## Outputs + +No output. + diff --git a/digitalocean-droplet/backend.tf b/digitalocean-droplet/backend.tf new file mode 100644 index 00000000..648827a4 --- /dev/null +++ b/digitalocean-droplet/backend.tf @@ -0,0 +1,8 @@ +terraform { + backend "remote" { + organization = "micronode" + workspaces { + prefix = "do-droplet-" + } + } +} diff --git a/digitalocean-droplet/bastion/README.md b/digitalocean-droplet/bastion/README.md new file mode 100644 index 00000000..90c06b45 --- /dev/null +++ b/digitalocean-droplet/bastion/README.md @@ -0,0 +1,28 @@ +# Digital Ocean Bastion host configuration + +Provision a droplet with SSH ingress authenticated with the specified public key. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| bastion\_image | Digital Ocean image for bastion droplet | string | `ubuntu-18-04-x64` | no | +| bastion\_user | Username for bastion SSH user | string | - | yes | +| do\_region | Digital Ocean region | string | - | yes | +| do\_token | Digital Ocean API token | string | - | yes | +| enabled | Start/stop the bastion host | string | - | yes | +| image\_os | The operating system installed on the selected droplet. Valid values are:

* ubuntu = Ubuntu | string | `ubuntu` | no | +| shutdown\_delay | Number of minutes before the host will automatically shutdown | string | `60` | no | +| ssh\_key | Identifier of public key file for SSH access to droplets | string | - | yes | +| ssh\_private\_key | Location of private key file for SSH access to droplets | string | `~/.ssh/id_rsa` | no | +| userdata\_path | The root path to userdata templates | string | `userdata` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| bastion\_ip | IP address for bastion droplet | +| enabled | Start/stop the bastion host | +| monthly\_cost | Monthly cost for bastion droplet | +| ssh\_key | Name of key for SSH access to droplets | + diff --git a/digitalocean-droplet/bastion/export.sh b/digitalocean-droplet/bastion/export.sh new file mode 100755 index 00000000..4d8769c8 --- /dev/null +++ b/digitalocean-droplet/bastion/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/templates ./ diff --git a/digitalocean-droplet/bastion/provider.tf b/digitalocean-droplet/bastion/provider.tf new file mode 100644 index 00000000..f72d4eb1 --- /dev/null +++ b/digitalocean-droplet/bastion/provider.tf @@ -0,0 +1,6 @@ +provider "digitalocean" { +} + +provider "null" { + version = ">= 2.1.0" +} diff --git a/digitalocean-droplet/bastion/templates/motd.yml b/digitalocean-droplet/bastion/templates/motd.yml new file mode 100644 index 00000000..38c01889 --- /dev/null +++ b/digitalocean-droplet/bastion/templates/motd.yml @@ -0,0 +1,3 @@ +UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED + +You must have explicit, authorized permission to access or configure this device. Unauthorized attempts and actions to access or use this system may result in civil and/or criminal penalties. All activities performed on this device are logged and monitored. diff --git a/digitalocean-droplet/bastion/templates/ubuntu.yml b/digitalocean-droplet/bastion/templates/ubuntu.yml new file mode 100644 index 00000000..97cd069d --- /dev/null +++ b/digitalocean-droplet/bastion/templates/ubuntu.yml @@ -0,0 +1,50 @@ +#cloud-config +packages: + - fail2ban + - unattended-upgrades + - ntpdate + +timezone: Australia/Melbourne + +ntp: + enabled: true + servers: + - 0.au.pool.ntp.org + - 1.au.pool.ntp.org + - 2.au.pool.ntp.org + - 3.au.pool.ntp.org + +users: + - name: ${AuthorizedUserName} + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - ${AuthorizedUserSSHKey} + +write_files: + - path: /etc/update-motd.d/99-unauthorised-access + permissions: "0755" + owner: "root" + content: | + echo 'UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED + + You must have explicit, authorized permission to access or configure this device. Unauthorized attempts and + actions to access or use this system may result in civil and/or criminal penalties. All activities performed on + this device are logged and monitored.' + +runcmd: + # SSH + - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin no/' /etc/ssh/sshd_config +# - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication no/' /etc/ssh/sshd_config + - "printf '\nClientAliveInterval 100\nClientAliveCountMax 0' >> /etc/ssh/sshd_config" + - service ssh restart + # Papertrail + - "wget --header='X-Papertrail-Token: QHS89ESNb9Q0OGPK9Hu2' https://papertrailapp.com/destinations/2465304/setup.sh" + - bash setup.sh + +power_state: + timeout: 120 + delay: "${ShutdownDelay}" + message: System will shutdown in ${ShutdownDelay} minutes + mode: poweroff diff --git a/digitalocean-droplet/diagram.py b/digitalocean-droplet/diagram.py new file mode 100644 index 00000000..c275703a --- /dev/null +++ b/digitalocean-droplet/diagram.py @@ -0,0 +1,7 @@ +from diagrams import Diagram +from diagrams.onprem.iac import Terraform +from diagrams.aws.storage import S3 + +with Diagram("Digital Ocean Droplets", show=False, direction="TB"): + Terraform("organization") + S3("terraform_state") diff --git a/digitalocean-droplet/digital_ocean_droplets.png b/digitalocean-droplet/digital_ocean_droplets.png new file mode 100644 index 00000000..c5ffba56 Binary files /dev/null and b/digitalocean-droplet/digital_ocean_droplets.png differ diff --git a/digitalocean-droplet/docker/export.sh b/digitalocean-droplet/docker/export.sh new file mode 100755 index 00000000..4d8769c8 --- /dev/null +++ b/digitalocean-droplet/docker/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/templates ./ diff --git a/digitalocean-droplet/docker/provider.tf b/digitalocean-droplet/docker/provider.tf new file mode 100644 index 00000000..8900ecf5 --- /dev/null +++ b/digitalocean-droplet/docker/provider.tf @@ -0,0 +1,7 @@ +provider "digitalocean" { + version = "~> 1.2" +} + +provider "null" { + version = ">= 2.1.0" +} diff --git a/digitalocean-droplet/docker/templates/ubuntu.yml b/digitalocean-droplet/docker/templates/ubuntu.yml new file mode 100644 index 00000000..06d2e55c --- /dev/null +++ b/digitalocean-droplet/docker/templates/ubuntu.yml @@ -0,0 +1,64 @@ +#cloud-config +packages: + - apt-transport-https + - ca-certificates + - gnupg-agent + - software-properties-common + - unattended-upgrades + - curl + - ntpdate + - python-pip + - zip + - openjdk-11-jdk + +timezone: Australia/Melbourne + +ntp: + enabled: true + servers: + - 0.au.pool.ntp.org + - 1.au.pool.ntp.org + - 2.au.pool.ntp.org + - 3.au.pool.ntp.org + +users: + - name: ${AuthorizedUserName} + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - ${AuthorizedUserSSHKey} + +#groups: +# - docker: ['${AuthorizedUserName}'] + +write_files: + - path: /etc/update-motd.d/99-unauthorised-access + permissions: "0755" + owner: "root" + content: | + echo 'UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED + + You must have explicit, authorized permission to access or configure this device. Unauthorized attempts and + actions to access or use this system may result in civil and/or criminal penalties. All activities performed on + this device are logged and monitored.' + +runcmd: + # SSH + - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin no/' /etc/ssh/sshd_config + - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication no/' /etc/ssh/sshd_config + - restart ssh + # Docker + - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + - add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + - apt-get update && apt-get install -y docker-ce docker-ce-cli containerd.io + - usermod -aG docker ${AuthorizedUserName} + - pip install docker-compose + # sdkman + - curl -s "https://get.sdkman.io" | bash + # Papertrail + - "wget --header='X-Papertrail-Token: QHS89ESNb9Q0OGPK9Hu2' https://papertrailapp.com/destinations/2465304/setup.sh" + - bash setup.sh + - curl -O https://github.com/papertrail/remote_syslog2/releases/download/v0.20/remote-syslog2_0.20_amd64.deb + - dpkg --install remote-syslog2_0.20_amd64.deb + - remote_syslog diff --git a/digitalocean-droplet/main.tf b/digitalocean-droplet/main.tf new file mode 100644 index 00000000..52d552d2 --- /dev/null +++ b/digitalocean-droplet/main.tf @@ -0,0 +1,8 @@ +module "droplet" { + source = "figurate/droplet/digitalocean" + count = length(var.droplets) + + name = var.droplets[count.index][0] + region = var.droplets[count.index][1] + template_type = var.droplets[count.index][2] +} diff --git a/digitalocean-droplet/networking/.terraform.lock.hcl b/digitalocean-droplet/networking/.terraform.lock.hcl new file mode 100644 index 00000000..0b254ad0 --- /dev/null +++ b/digitalocean-droplet/networking/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/digitalocean/digitalocean" { + version = "2.6.0" + constraints = "2.6.0" + hashes = [ + "h1:/voJ6wIK/G8S/E7zLPvgweKMYGTC0ZcP45ei2gf/IFk=", + "h1:P1C7e6RlhLpi6KuE/sMruDdM5zZisJwMuKGbnxg8tAw=", + "zh:088c2a4eb9579947d50d8bcd722e75f2f1839acae302c8d43133b1da9926dae3", + "zh:323ba833d011371ca6d953752b133c0acad6462176cd2f804077a5f9d892cd2e", + "zh:3fbc64f1fabe57b6df49511c0d8753f1bbf776d5824ba060a51961d2a4265097", + "zh:4c90a933e23288ee2db2228e4e30055882d91bed831c2191cbecd849b27e44cb", + "zh:62f1cf4c82e5fcaf1a17e39cb96638f006b303758813a6c5ecb08bc93cd93364", + "zh:68ad1354e9f925477dc41e658e84a4996ba662920bbc61a2680235b94811169b", + "zh:9119b573c59429c2dfacb7d95b39c4e021783b8281ecd68f1621ad4a17c112cd", + "zh:9c15e3660f2399c25ee3ad53bd54927a6529d1393a54f1e1c2a523e0369dea46", + "zh:bc88f68bf6a6b5e803734f06731e31d61a5977ed1a638bfe102a54094c4d4030", + "zh:c2b013a5d7e60b31211b0f8c0dd898840b8f1aa7225318da05def33b5edb9388", + "zh:e46e21f6ffa7aac11ade8ab4b87a28ac405ef40a35793cef1f1fd6db6d8e5a0a", + "zh:e879643369e03abc192fbcf7ab06611bb8f36d37ceb5641ba05d58869f10ab7c", + "zh:ee9b56400e545ce1805842b795179a004313b8a947bd8f3490f5c5a0cb7703e5", + "zh:fb44861ae0b58b594aa4e565e0ed06bce939753b14a20b4abd3e8276e839e7a7", + ] +} diff --git a/digitalocean-droplet/networking/Makefile b/digitalocean-droplet/networking/Makefile new file mode 100644 index 00000000..6c8bc7b8 --- /dev/null +++ b/digitalocean-droplet/networking/Makefile @@ -0,0 +1,64 @@ +SHELL:=/bin/bash +TF_WORKSPACE?=micronode + +ifneq (, $(shell which docker)) +TERRAFORM_VERSION=0.14.7 +TERRAFORM=docker run --rm -v "${PWD}:/work" -v "${HOME}/.ssh:/root/.ssh" -v "${HOME}/.aws:/root/.aws" -v "${HOME}/.terraformrc:/root/.terraformrc" -e AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) -e http_proxy=$(http_proxy) --net=host -w /work hashicorp/terraform:$(TERRAFORM_VERSION) +else +TERRAFORM=terraform +endif + +TERRAFORM_DOCS=docker run --rm -v "${PWD}:/work" tmknom/terraform-docs + +CHECKOV=docker run --rm -v "${PWD}:/work" bridgecrew/checkov + +TFSEC=docker run --rm -v "${PWD}:/work" liamg/tfsec + +DIAGRAMS=docker run -v "${PWD}:/work" figurate/diagrams python + +.PHONY: all clean init validate test docs format list new select plan apply + +WORKSPACE=$(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + +all: test docs format + +clean: + rm -rf .terraform/ + +init: + $(TERRAFORM) init -upgrade + +validate: init + $(TERRAFORM) validate + +test: validate + $(CHECKOV) -d /work + + $(TFSEC) /work + +diagram: + $(DIAGRAMS) diagram.py + +docs: diagram + $(TERRAFORM_DOCS) markdown ./ >./README.md + +format: + $(TERRAFORM) fmt -list=true ./ + +list: + $(TERRAFORM) workspace list + +new: + $(TERRAFORM) workspace new $(WORKSPACE) + +select: + $(TERRAFORM) workspace select $(WORKSPACE) + +workspace: + $(TERRAFORM) workspace show + +plan: + $(TERRAFORM) plan -input=false -var-file=$(TF_WORKSPACE).tfvars + +apply: + $(TERRAFORM) apply -input=false -var-file=$(TF_WORKSPACE).tfvars $(TF_ARGS) \ No newline at end of file diff --git a/digitalocean-droplet/networking/backend.tf b/digitalocean-droplet/networking/backend.tf new file mode 100644 index 00000000..3198f1b2 --- /dev/null +++ b/digitalocean-droplet/networking/backend.tf @@ -0,0 +1,8 @@ +terraform { + backend "remote" { + organization="micronode" + workspaces { + prefix = "do-networking-" + } + } +} diff --git a/digitalocean-droplet/networking/diagram.py b/digitalocean-droplet/networking/diagram.py new file mode 100644 index 00000000..fa440848 --- /dev/null +++ b/digitalocean-droplet/networking/diagram.py @@ -0,0 +1,7 @@ +from diagrams import Diagram +from diagrams.onprem.iac import Terraform +from diagrams.aws.storage import S3 + +with Diagram("Digital Ocean Networking", show=False, direction="TB"): + Terraform("organization") + S3("terraform_state") diff --git a/digitalocean-droplet/networking/domain/main.tf b/digitalocean-droplet/networking/domain/main.tf new file mode 100644 index 00000000..f197ab1a --- /dev/null +++ b/digitalocean-droplet/networking/domain/main.tf @@ -0,0 +1,21 @@ +/** + * Creates a DNS root domain. + */ +resource "digitalocean_floating_ip" apex_target { + count = var.apex_target == "" ? 1 : 0 + region = var.do_region +} + +resource "digitalocean_domain" domain { + name = var.apex_domain + ip_address = var.apex_target != "" ? var.apex_target : digitalocean_floating_ip.apex_target[0].ip_address +} + +resource "digitalocean_record" record { + count = length(var.aliases) + domain = digitalocean_domain.domain.name + name = var.aliases[count.index] + type = "CNAME" + ttl = var.record_ttl + value = "${digitalocean_domain.domain.name}." +} diff --git a/digitalocean-droplet/networking/domain/output.tf b/digitalocean-droplet/networking/domain/output.tf new file mode 100644 index 00000000..27ee217e --- /dev/null +++ b/digitalocean-droplet/networking/domain/output.tf @@ -0,0 +1,3 @@ +output "floating_ips" { + value = digitalocean_floating_ip.apex_target.*.ip_address +} diff --git a/digitalocean-droplet/networking/firewall.tf b/digitalocean-droplet/networking/firewall.tf new file mode 100644 index 00000000..8c9db06f --- /dev/null +++ b/digitalocean-droplet/networking/firewall.tf @@ -0,0 +1,8 @@ +module "firewall" { + source = "figurate/firewall/digitalocean" + count = length(var.firewalls) + + name = var.firewalls[count.index][0] + ingress_rules = var.firewalls[count.index][1] + egress_rules = var.firewalls[count.index][2] +} diff --git a/digitalocean-droplet/networking/provider.tf b/digitalocean-droplet/networking/provider.tf new file mode 100644 index 00000000..c3ba57e1 --- /dev/null +++ b/digitalocean-droplet/networking/provider.tf @@ -0,0 +1,2 @@ +provider "digitalocean" { +} diff --git a/digitalocean-droplet/networking/vars.tf b/digitalocean-droplet/networking/vars.tf new file mode 100644 index 00000000..5d386eae --- /dev/null +++ b/digitalocean-droplet/networking/vars.tf @@ -0,0 +1,5 @@ +variable "firewalls" { + description = "A list of firewall rule configs" + type = list(tuple([string, list(string), list(string)])) + default = [] +} diff --git a/digitalocean-droplet/networking/versions.tf b/digitalocean-droplet/networking/versions.tf new file mode 100644 index 00000000..4514993b --- /dev/null +++ b/digitalocean-droplet/networking/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = "2.6.0" + } + } +} diff --git a/digitalocean-droplet/provider.tf b/digitalocean-droplet/provider.tf new file mode 100644 index 00000000..c3ba57e1 --- /dev/null +++ b/digitalocean-droplet/provider.tf @@ -0,0 +1,2 @@ +provider "digitalocean" { +} diff --git a/digitalocean-droplet/rancher/README.md b/digitalocean-droplet/rancher/README.md new file mode 100644 index 00000000..250c7425 --- /dev/null +++ b/digitalocean-droplet/rancher/README.md @@ -0,0 +1,31 @@ +# Digital Ocean Rancher host configuration + +Provision a droplet with Rancher server. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| bastion\_host | Bastion host used to access reverse proxy | string | - | yes | +| do\_region | Digital Ocean region | string | - | yes | +| do\_token | Digital Ocean API token | string | - | yes | +| enabled | Start/stop the rancher server host | string | - | yes | +| environment | Environment identifier for the rancher hosts | string | - | yes | +| hostname | Hostname to configure in virtual host | string | `rancher.mnode.org` | no | +| papertrail\_host | Target URL for Papertrail logs | string | - | yes | +| papertrail\_port | Target port for Papertrail logs | string | - | yes | +| rancher\_image | Digital Ocean image for rancher server droplet | string | `ubuntu-18-04-x64` | no | +| reverseproxy\_host | Host to install vhost configuration | string | - | yes | +| ssh\_key | Identifier of public key file for SSH access to droplets | string | - | yes | +| ssh\_private\_key | Location of private key file for SSH access to droplets | string | `~/.ssh/id_rsa` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| do\_region | Digital Ocean region | +| do\_token | Digital Ocean API token | +| enabled | Start/stop the rancher host | +| rancherserver\_ip | - | +| ssh\_key | Location of public key file for SSH access to droplets | + diff --git a/digitalocean-droplet/rancher/export.sh b/digitalocean-droplet/rancher/export.sh new file mode 100755 index 00000000..4d8769c8 --- /dev/null +++ b/digitalocean-droplet/rancher/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/templates ./ diff --git a/digitalocean-droplet/rancher/main.tf b/digitalocean-droplet/rancher/main.tf new file mode 100644 index 00000000..ec20a597 --- /dev/null +++ b/digitalocean-droplet/rancher/main.tf @@ -0,0 +1,50 @@ +/** + * # Digital Ocean Rancher host configuration + * + * Provision a droplet with Rancher server. + */ +data "template_file" "userdata" { + template = file(format("%s/%s.yml", var.template_path, var.image_os)) + vars = { + AuthorizedUserName = var.ssh_user + AuthorizedUserSSHKey = var.ssh_key + PapertrailHost = var.papertrail_host + PapertrailPort = var.papertrail_port + } +} + +resource "digitalocean_tag" "rancherserver" { + name = "rancherserver" +} + +resource "digitalocean_tag" "rancheragent" { + name = "rancheragent" +} + +resource "digitalocean_droplet" "rancherserver" { + count = var.enabled + image = var.rancher_image + name = "${var.environment}-rancherserver.${var.apex_domain}" + region = var.do_region + size = "s-1vcpu-2gb" + private_networking = true + monitoring = true + // volume_ids = ["${digitalocean_volume.rancher_data.id}"] + tags = [ + digitalocean_tag.rancherserver.name] + // ssh_keys = ["${var.ssh_key}"] + // depends_on = ["digitalocean_volume.rancher_data"] + user_data = data.template_file.userdata.rendered +} + +//resource "digitalocean_volume" "rancher_data" { +// name = "rancher-data.${var.environment}" +// region = "${var.do_region}" +// size = 50 +//} + +resource "digitalocean_floating_ip_assignment" "rancherserver" { + count = length(var.floatingip_addresses) + droplet_id = digitalocean_droplet.rancherserver[count.index].id + ip_address = var.floatingip_addresses[count.index] +} diff --git a/digitalocean-droplet/rancher/networking.tf b/digitalocean-droplet/rancher/networking.tf new file mode 100644 index 00000000..24d44eb1 --- /dev/null +++ b/digitalocean-droplet/rancher/networking.tf @@ -0,0 +1,180 @@ +resource "digitalocean_firewall" "default" { + name = "${local.uuid}" + + tags = [ + "${digitalocean_tag.rancherserver.id}", + "${digitalocean_tag.rancheragent.id}", + ] + + inbound_rule { + protocol = "tcp" + port_range = "22" + source_tags = ["bastion"] + } + + outbound_rule = [ + { + protocol = "tcp" + port_range = "80" + destination_addresses = ["0.0.0.0/0", "::/0"] + }, + { + protocol = "tcp" + port_range = "443" + destination_addresses = ["0.0.0.0/0", "::/0"] + }, + { + protocol = "tcp" + port_range = "53" + destination_addresses = ["0.0.0.0/0", "::/0"] + }, + { + protocol = "udp" + port_range = "53" + destination_addresses = ["0.0.0.0/0", "::/0"] + }, + { + protocol = "udp" + port_range = "123" + destination_addresses = ["0.0.0.0/0", "::/0"] + }, + ] +} + +resource "digitalocean_firewall" "papertrail" { + name = "${local.uuid}-papertrail" + + tags = [ + "${digitalocean_tag.rancherserver.id}", + "${digitalocean_tag.rancheragent.id}", + ] + + outbound_rule = [ + { + protocol = "tcp" + port_range = "51501" + destination_addresses = ["0.0.0.0/0", "::/0"] + }, + { + protocol = "udp" + port_range = "51501" + destination_addresses = ["0.0.0.0/0", "::/0"] + }, + ] +} + +resource "digitalocean_firewall" "rancherserver" { + name = "${local.uuid}-rancherserver" + + tags = ["${digitalocean_tag.rancherserver.id}"] + + inbound_rule = [ + { + protocol = "udp" + port_range = "4500" + source_tags = ["${digitalocean_tag.rancheragent.id}"] + }, + { + protocol = "udp" + port_range = "500" + source_tags = ["${digitalocean_tag.rancheragent.id}"] + }, + { + protocol = "tcp" + port_range = "8080" + source_tags = ["reverseproxy", "${digitalocean_tag.rancheragent.id}"] + }, + ] + + outbound_rule = [ + { + protocol = "tcp" + port_range = "22" + destination_tags = ["${digitalocean_tag.rancheragent.id}"] + }, + { + protocol = "tcp" + port_range = "2376" + destination_tags = ["${digitalocean_tag.rancheragent.id}"] + }, + { + protocol = "tcp" + port_range = "8080" + destination_tags = ["${digitalocean_tag.rancheragent.id}"] + }, + ] +} + +resource "digitalocean_firewall" "rancheragent" { + name = "${local.uuid}-rancheragent" + + tags = ["${digitalocean_tag.rancheragent.id}"] + + inbound_rule = [ + { + protocol = "tcp" + port_range = "22" + source_tags = ["${digitalocean_tag.rancherserver.id}"] + }, + { + protocol = "tcp" + port_range = "2376" + source_tags = ["${digitalocean_tag.rancherserver.id}"] + }, + { + protocol = "udp" + port_range = "4500" + source_tags = [ + "${digitalocean_tag.rancherserver.id}", + "${digitalocean_tag.rancheragent.id}" + ] + }, + { + protocol = "udp" + port_range = "500" + source_tags = [ + "${digitalocean_tag.rancherserver.id}", + "${digitalocean_tag.rancheragent.id}" + ] + }, + { + protocol = "tcp" + port_range = "443" + source_tags = ["reverseproxy", "${digitalocean_tag.rancherserver.id}"] + }, + { + protocol = "tcp" + port_range = "80" + source_tags = ["reverseproxy", "${digitalocean_tag.rancherserver.id}"] + }, + { + protocol = "tcp" + port_range = "8080-8082" + source_tags = ["reverseproxy", "${digitalocean_tag.rancherserver.id}"] + }, + ] + + outbound_rule = [ + { + protocol = "udp" + port_range = "4500" + destination_tags = [ + "${digitalocean_tag.rancherserver.id}", + "${digitalocean_tag.rancheragent.id}" + ] + }, + { + protocol = "udp" + port_range = "500" + destination_tags = [ + "${digitalocean_tag.rancherserver.id}", + "${digitalocean_tag.rancheragent.id}" + ] + }, + { + protocol = "tcp" + port_range = "8080" + destination_tags = ["${digitalocean_tag.rancherserver.id}"] + }, + ] +} \ No newline at end of file diff --git a/digitalocean-droplet/rancher/outputs.tf b/digitalocean-droplet/rancher/outputs.tf new file mode 100644 index 00000000..7700eb3e --- /dev/null +++ b/digitalocean-droplet/rancher/outputs.tf @@ -0,0 +1,3 @@ +output "rancherserver_ip" { + value = digitalocean_droplet.rancherserver.*.ipv4_address +} diff --git a/digitalocean-droplet/rancher/provider.tf b/digitalocean-droplet/rancher/provider.tf new file mode 100644 index 00000000..f72d4eb1 --- /dev/null +++ b/digitalocean-droplet/rancher/provider.tf @@ -0,0 +1,6 @@ +provider "digitalocean" { +} + +provider "null" { + version = ">= 2.1.0" +} diff --git a/digitalocean-droplet/rancher/templates/ubuntu.yml b/digitalocean-droplet/rancher/templates/ubuntu.yml new file mode 100644 index 00000000..ee79708e --- /dev/null +++ b/digitalocean-droplet/rancher/templates/ubuntu.yml @@ -0,0 +1,54 @@ +#cloud-config +bootcmd: + - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + +apt: + sources: + docker: + source: deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable + +packages: + - apt-transport-https + - ca-certificates + - curl + - software-properties-common + - unattended-upgrades + - docker-ce + +timezone: Australia/Melbourne + +ntp: + enabled: true + servers: + - 0.au.pool.ntp.org + - 1.au.pool.ntp.org + - 2.au.pool.ntp.org + - 3.au.pool.ntp.org + +users: + - name: ${AuthorizedUserName} + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - ${AuthorizedUserSSHKey} + +write_files: + - path: /etc/log_files.yml + content: | + files: + - /var/log/nginx/access.log + - /var/log/nginx/error.log + destination: + host: ${PapertrailHost} + port: ${PapertrailPort} + protocol: tls + pid_file: /var/run/remote_syslog.pid + +runcmd: + - docker run -d --restart=unless-stopped -p 8080:8080 rancher/server + - "wget --header='X-Papertrail-Token: QHS89ESNb9Q0OGPK9Hu2' https://papertrailapp.com/destinations/2465304/setup.sh" + - bash setup.sh + - curl -O https://github.com/papertrail/remote_syslog2/releases/download/v0.20/remote-syslog2_0.20_amd64.deb + - dpkg --install remote-syslog2_0.20_amd64.deb + - remote_syslog diff --git a/digitalocean-droplet/rancher/vars.tf b/digitalocean-droplet/rancher/vars.tf new file mode 100644 index 00000000..f53b19d3 --- /dev/null +++ b/digitalocean-droplet/rancher/vars.tf @@ -0,0 +1,77 @@ +variable "template_path" { + description = "The root path to userdata templates" + default = "templates" +} + +variable "do_region" { + description = "Digital Ocean region" +} + +variable "ssh_user" { + description = "Username for SSH user" +} + +variable "ssh_key" { + description = "Identifier of public key file for SSH access to droplets" +} + +variable "ssh_private_key" { + description = "Location of private key file for SSH access to droplets" +// default = "~/.ssh/id_rsa" +} + +variable "rancher_image" { + description = "Digital Ocean image for rancher server droplet" + default = "ubuntu-18-04-x64" +} + +variable "image_os" { + description = <
* ubuntu = Ubuntu | string | `ubuntu` | no | +| papertrail\_host | Target URL for Papertrail logs | string | - | yes | +| papertrail\_port | Target port for Papertrail logs | string | - | yes | +| reverseproxy\_image | Digital Ocean image for reverseproxy droplet | string | `ubuntu-18-04-x64` | no | +| reverseproxy\_user | Username for reverseproxy SSH user | string | - | yes | +| ssh\_key | Identifier of public key file for SSH access to droplets | string | - | yes | +| upstream\_ports | A list of ports to route upstream traffic | list | `` | no | +| userdata\_path | The root path to userdata templates | string | `userdata` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| do\_region | Digital Ocean region | +| do\_token | Digital Ocean API token | +| enabled | Start/stop the reverseproxy host | +| floating\_ip | Floating IP address for reverseproxy droplet | +| reverseproxy\_ip | IP address for reverseproxy droplet | +| reverseproxy\_ip\_private | Private IP address for reverseproxy droplet | +| ssh\_key | Location of public key file for SSH access to droplets | + diff --git a/digitalocean-droplet/reverseproxy/export.sh b/digitalocean-droplet/reverseproxy/export.sh new file mode 100755 index 00000000..4d8769c8 --- /dev/null +++ b/digitalocean-droplet/reverseproxy/export.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Generate terraform backend config +sh /bootstrap/backend_tf.sh > /bootstrap/backend.tf + +# export terraform files to current directory +cp /bootstrap/*.tf ./ +cp -R /bootstrap/templates ./ diff --git a/digitalocean-droplet/reverseproxy/main.tf b/digitalocean-droplet/reverseproxy/main.tf new file mode 100644 index 00000000..7cdd2ada --- /dev/null +++ b/digitalocean-droplet/reverseproxy/main.tf @@ -0,0 +1,43 @@ +/** + * # Digital Ocean Reverse Proxy host configuration + * + * Provision a droplet with NGINX and letsencrypt installed. + */ +data digitalocean_ssh_key ssh_key { + name = var.ssh_key +} + +data "template_file" "userdata" { + template = file(format("%s/%s.yml", var.template_path, var.image_os)) + vars = { + NginxAmplifyKey = var.amplify_key + NginxHostname = "${var.environment}-reverseproxy" + AuthorizedUserName = var.ssh_user + AuthorizedUserSSHKey = data.digitalocean_ssh_key.ssh_key.public_key + PapertrailHost = var.papertrail_host + PapertrailPort = var.papertrail_port + } +} + +resource "digitalocean_tag" "reverseproxy" { + name = "reverseproxy" +} + +resource "digitalocean_droplet" "reverseproxy" { + count = var.instance_count + image = var.droplet_image + name = "${var.environment}${count.index + 1}-reverseproxy.${var.apex_domain}" + region = var.do_region + size = "s-1vcpu-1gb" + private_networking = true + monitoring = true + tags = [digitalocean_tag.reverseproxy.name] + ssh_keys = [data.digitalocean_ssh_key.ssh_key.fingerprint] + user_data = data.template_file.userdata.rendered +} + +resource "digitalocean_floating_ip_assignment" "reverseproxy" { + count = length(var.floatingip_addresses) + droplet_id = digitalocean_droplet.reverseproxy[count.index].id + ip_address = var.floatingip_addresses[count.index] +} diff --git a/digitalocean-droplet/reverseproxy/networking.tf b/digitalocean-droplet/reverseproxy/networking.tf new file mode 100644 index 00000000..704d4bee --- /dev/null +++ b/digitalocean-droplet/reverseproxy/networking.tf @@ -0,0 +1,87 @@ +resource "digitalocean_firewall" "ssh" { + name = "${local.uuid}-ssh" + tags = [digitalocean_tag.reverseproxy.id] + inbound_rule { + protocol = "tcp" + port_range = "22" + source_tags = ["bastion"] + } +} + +resource "digitalocean_firewall" http { + name = "${local.uuid}-http" + tags = [digitalocean_tag.reverseproxy.id] + inbound_rule { + protocol = "tcp" + port_range = "443" + source_addresses = ["0.0.0.0/0", "::/0"] + } + inbound_rule { + protocol = "tcp" + port_range = "80" + source_addresses = ["0.0.0.0/0", "::/0"] + } + outbound_rule { + protocol = "tcp" + port_range = "80" + destination_addresses = ["0.0.0.0/0", "::/0"] + } + outbound_rule { + protocol = "tcp" + port_range = "443" + destination_addresses = ["0.0.0.0/0", "::/0"] + } +} + +resource "digitalocean_firewall" dns { + name = "${local.uuid}-dns" + tags = [digitalocean_tag.reverseproxy.id] + outbound_rule { + protocol = "tcp" + port_range = "53" + destination_addresses = ["0.0.0.0/0", "::/0"] + } + outbound_rule { + protocol = "udp" + port_range = "53" + destination_addresses = ["0.0.0.0/0", "::/0"] + } +} + +resource "digitalocean_firewall" ntp { + name = "${local.uuid}-ntp" + tags = [digitalocean_tag.reverseproxy.id] + outbound_rule { + protocol = "udp" + port_range = "123" + destination_addresses = ["0.0.0.0/0", "::/0"] + } +} + +resource "digitalocean_firewall" "papertrail" { + count = var.papertrail_host != "" ? 1 : 0 + name = "${local.uuid}-papertrail" + tags = [digitalocean_tag.reverseproxy.id] + outbound_rule { + protocol = "tcp" + port_range = var.papertrail_port + destination_addresses = ["0.0.0.0/0", "::/0"] + } + outbound_rule { + protocol = "udp" + port_range = var.papertrail_port + destination_addresses = ["0.0.0.0/0", "::/0"] + } +} + +resource "digitalocean_firewall" "upstream" { + count = length(var.upstream_ports) + name = "${local.uuid}-upstream-${count.index}" + tags = [digitalocean_tag.reverseproxy.id] + outbound_rule { + protocol = "tcp" + port_range = var.upstream_ports[count.index] + destination_tags = var.upstream_tags + destination_addresses = var.upstream_addresses + } +} \ No newline at end of file diff --git a/digitalocean-droplet/reverseproxy/outputs.tf b/digitalocean-droplet/reverseproxy/outputs.tf new file mode 100644 index 00000000..4972b5c3 --- /dev/null +++ b/digitalocean-droplet/reverseproxy/outputs.tf @@ -0,0 +1,9 @@ +output "reverseproxy_ip" { + description = "IP address for reverseproxy droplet" + value = digitalocean_droplet.reverseproxy.*.ipv4_address +} + +output "reverseproxy_ip_private" { + description = "Private IP address for reverseproxy droplet" + value = digitalocean_droplet.reverseproxy.*.ipv4_address_private +} diff --git a/digitalocean-droplet/reverseproxy/provider.tf b/digitalocean-droplet/reverseproxy/provider.tf new file mode 100644 index 00000000..74f5d03b --- /dev/null +++ b/digitalocean-droplet/reverseproxy/provider.tf @@ -0,0 +1,3 @@ +provider "digitalocean" { + version = "~> 1.2" +} diff --git a/digitalocean-droplet/reverseproxy/templates/ubuntu.yml b/digitalocean-droplet/reverseproxy/templates/ubuntu.yml new file mode 100644 index 00000000..a5256371 --- /dev/null +++ b/digitalocean-droplet/reverseproxy/templates/ubuntu.yml @@ -0,0 +1,78 @@ +#cloud-config +apt: + sources: + certbot: + source: ppa:certbot/certbot + +packages: + - nginx + - unattended-upgrades + - curl + - ntpdate + - python + - python-certbot-nginx + - awstats + - libgeoip-dev + - build-essential + +timezone: Australia/Melbourne + +ntp: + enabled: true + servers: + - 0.au.pool.ntp.org + - 1.au.pool.ntp.org + - 2.au.pool.ntp.org + - 3.au.pool.ntp.org + +users: + - name: ${AuthorizedUserName} + groups: sudo + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - ${AuthorizedUserSSHKey} + + +write_files: + - path: /etc/update-motd.d/99-unauthorised-access + permissions: "0755" + owner: "root" + content: | + echo 'UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED + + You must have explicit, authorized permission to access or configure this device. Unauthorized attempts and + actions to access or use this system may result in civil and/or criminal penalties. All activities performed on + this device are logged and monitored.' + - path: /etc/nginx/conf.d/stub_status.conf + content: | + server { + listen 127.0.0.1:80; + server_name 127.0.0.1; + location /nginx_status { + stub_status on; + allow 127.0.0.1; + deny all; + } + } + - path: /etc/log_files.yml + content: | + files: + - /var/log/nginx/access.log + - /var/log/nginx/error.log + destination: + host: ${PapertrailHost} + port: ${PapertrailPort} + protocol: tls + pid_file: /var/run/remote_syslog.pid + +runcmd: + - export API_KEY="${NginxAmplifyKey}" AMPLIFY_HOSTNAME="${NginxHostname}" + - curl -L https://github.com/nginxinc/nginx-amplify-agent/raw/master/packages/install.sh | bash + # Papertrail setup.. + - "wget --header='X-Papertrail-Token: QHS89ESNb9Q0OGPK9Hu2' https://papertrailapp.com/destinations/2465304/setup.sh" + - bash setup.sh + - curl -O https://github.com/papertrail/remote_syslog2/releases/download/v0.20/remote-syslog2_0.20_amd64.deb + - dpkg --install remote-syslog2_0.20_amd64.deb + - remote_syslog + diff --git a/digitalocean-droplet/reverseproxy/vars.tf b/digitalocean-droplet/reverseproxy/vars.tf new file mode 100644 index 00000000..ef7332a7 --- /dev/null +++ b/digitalocean-droplet/reverseproxy/vars.tf @@ -0,0 +1,87 @@ +variable "template_path" { + description = "The root path to userdata templates" + default = "templates" +} + +variable "do_region" { + description = "Digital Ocean region" +} + +variable "instance_count" { + description = "Number of hosts to provision" + default = 1 +} + +variable "ssh_user" { + description = "Username for reverseproxy SSH user" +} + +variable "ssh_key" { + description = "Identifier of public key file for SSH access to droplets" +} + +variable "droplet_image" { + description = "Digital Ocean image for droplet" + default = "ubuntu-18-04-x64" +} + +variable "image_os" { + description = < $@ + +sinclude $(DEPENDS) + +$(IMAGES): %: + +ifeq (clean,$(filter clean,$(MAKECMDGOALS))) + docker rmi $(REGISTRY)/$@-roles +endif + +ifeq (test,$(filter test,$(MAKECMDGOALS))) + $(TERRAFORM) init ./$(subst -,/,$@) && \ + $(TERRAFORM) get -update && \ + $(TERRAFORM) validate ./$(subst -,/,$@) +endif + + docker build ${BUILD_ARGS} --build-arg TERRAFORM_VERSION=$(TERRAFORM_VERSION) --build-arg HTTP_PROXY=${http_proxy} -t $(REGISTRY)/$@-roles --network=host $(subst -,/,$@) + +ifeq (tag,$(filter tag,$(MAKECMDGOALS))) + echo $(TAGS) | tr "/," "-\n" | xargs -n1 -I % docker tag $(REGISTRY)/$@ $(REGISTRY)/$@:% +endif + +ifeq (push,$(filter push,$(MAKECMDGOALS))) + echo $(TAGS) | tr "/," "-\n" | xargs -n1 -I % docker push $(REGISTRY)/$@:% +endif + +ifeq (docs,$(filter docs,$(MAKECMDGOALS))) + docker run --rm -v "${PWD}:/work" tmknom/terraform-docs markdown $(subst -,/,$@)/ >$(subst -,/,$@)/README.md +endif + +ifeq (format,$(filter format,$(MAKECMDGOALS))) + $(TERRAFORM) fmt -list=true $(subst -,/,$@)/ +endif diff --git a/blueprints/dns/domain/do/Dockerfile b/incubator/roles/apachesling/aws/Dockerfile similarity index 100% rename from blueprints/dns/domain/do/Dockerfile rename to incubator/roles/apachesling/aws/Dockerfile diff --git a/incubator/roles/apachesling/aws/README.md b/incubator/roles/apachesling/aws/README.md new file mode 100644 index 00000000..c6478372 --- /dev/null +++ b/incubator/roles/apachesling/aws/README.md @@ -0,0 +1,17 @@ +# AWS Sling role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Sling hosts specific to this blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/apachesling/aws/cloudformation.tf b/incubator/roles/apachesling/aws/cloudformation.tf new file mode 100644 index 00000000..3a26fc97 --- /dev/null +++ b/incubator/roles/apachesling/aws/cloudformation.tf @@ -0,0 +1,40 @@ +data "aws_iam_policy_document" "cloudformation_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["cloudformation.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "sling_cloudformation" { + name = "bedrock-apachesling-cloudformation" + assume_role_policy = "${data.aws_iam_policy_document.cloudformation_assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2FullAccess" + role = "${aws_iam_role.sling_cloudformation.name}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.sling_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "ec2_instance_profile_fullaccess" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-ec2-instance-profile-fullaccess" + role = "${aws_iam_role.sling_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_log_groups" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-log-groups" + role = "${aws_iam_role.sling_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "route53_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" + role = "${aws_iam_role.sling_cloudformation.name}" +} diff --git a/incubator/roles/apachesling/aws/main.tf b/incubator/roles/apachesling/aws/main.tf new file mode 100644 index 00000000..baf0f3b8 --- /dev/null +++ b/incubator/roles/apachesling/aws/main.tf @@ -0,0 +1,53 @@ +/** + * # AWS Sling role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Sling hosts specific to this blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "sling_admin" { + name = "bedrock-apachesling-admin" + assume_role_policy = "${data.aws_iam_policy_document.assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" + role = "${aws_iam_role.sling_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_readonly" { + policy_arn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + role = "${aws_iam_role.sling_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess" + role = "${aws_iam_role.sling_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.sling_admin.id}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_create" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-create" + role = "${aws_iam_role.sling_admin.id}" +} diff --git a/incubator/roles/apachesling/aws/provider.tf b/incubator/roles/apachesling/aws/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/apachesling/aws/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/dns/record/aws/Dockerfile b/incubator/roles/apachesolr/aws/Dockerfile similarity index 100% rename from blueprints/dns/record/aws/Dockerfile rename to incubator/roles/apachesolr/aws/Dockerfile diff --git a/incubator/roles/apachesolr/aws/README.md b/incubator/roles/apachesolr/aws/README.md new file mode 100644 index 00000000..f0ec9d88 --- /dev/null +++ b/incubator/roles/apachesolr/aws/README.md @@ -0,0 +1,17 @@ +# AWS Solr role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Solr hosts specific to this blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/apachesolr/aws/cloudformation.tf b/incubator/roles/apachesolr/aws/cloudformation.tf new file mode 100644 index 00000000..d4924e0b --- /dev/null +++ b/incubator/roles/apachesolr/aws/cloudformation.tf @@ -0,0 +1,40 @@ +data "aws_iam_policy_document" "cloudformation_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["cloudformation.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "solr_cloudformation" { + name = "bedrock-apachesolr-cloudformation" + assume_role_policy = "${data.aws_iam_policy_document.cloudformation_assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2FullAccess" + role = "${aws_iam_role.solr_cloudformation.name}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.solr_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "ec2_instance_profile_fullaccess" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-ec2-instance-profile-fullaccess" + role = "${aws_iam_role.solr_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_log_groups" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-log-groups" + role = "${aws_iam_role.solr_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "route53_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" + role = "${aws_iam_role.solr_cloudformation.name}" +} diff --git a/incubator/roles/apachesolr/aws/main.tf b/incubator/roles/apachesolr/aws/main.tf new file mode 100644 index 00000000..990d0dbc --- /dev/null +++ b/incubator/roles/apachesolr/aws/main.tf @@ -0,0 +1,53 @@ +/** + * # AWS Solr role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Solr hosts specific to this blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "solr_admin" { + name = "bedrock-apachesolr-admin" + assume_role_policy = "${data.aws_iam_policy_document.assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" + role = "${aws_iam_role.solr_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_readonly" { + policy_arn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + role = "${aws_iam_role.solr_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess" + role = "${aws_iam_role.solr_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.solr_admin.id}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_create" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-create" + role = "${aws_iam_role.solr_admin.id}" +} diff --git a/incubator/roles/apachesolr/aws/provider.tf b/incubator/roles/apachesolr/aws/provider.tf new file mode 100644 index 00000000..5d2981b5 --- /dev/null +++ b/incubator/roles/apachesolr/aws/provider.tf @@ -0,0 +1,16 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/dns/record/do/Dockerfile b/incubator/roles/aws/chime/Dockerfile similarity index 100% rename from blueprints/dns/record/do/Dockerfile rename to incubator/roles/aws/chime/Dockerfile diff --git a/incubator/roles/aws/chime/README.md b/incubator/roles/aws/chime/README.md new file mode 100644 index 00000000..517c8b76 --- /dev/null +++ b/incubator/roles/aws/chime/README.md @@ -0,0 +1,17 @@ +# AWS EC2 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Lambda functions specific to this blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/chime/blueprint.tf b/incubator/roles/aws/chime/blueprint.tf new file mode 100644 index 00000000..a6fff314 --- /dev/null +++ b/incubator/roles/aws/chime/blueprint.tf @@ -0,0 +1,10 @@ +resource "aws_iam_role" "blueprint" { + name = "chime-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "lambda_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AWSLambdaFullAccess" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/chime/lambda.tf b/incubator/roles/aws/chime/lambda.tf new file mode 100644 index 00000000..b42f5fec --- /dev/null +++ b/incubator/roles/aws/chime/lambda.tf @@ -0,0 +1,25 @@ +data "aws_iam_policy_document" "lambda_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["lambda.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "lambda" { + name = "chime-lambda-role" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "lambda_config" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-lambda-config" + role = aws_iam_role.lambda.id +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_logs" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-logs" + role = aws_iam_role.lambda.id +} diff --git a/incubator/roles/aws/chime/main.tf b/incubator/roles/aws/chime/main.tf new file mode 100644 index 00000000..50296984 --- /dev/null +++ b/incubator/roles/aws/chime/main.tf @@ -0,0 +1,23 @@ +/** + * # AWS EC2 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Lambda functions specific to this blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} diff --git a/incubator/roles/aws/chime/provider.tf b/incubator/roles/aws/chime/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/chime/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/dns/redirect/aws/Dockerfile b/incubator/roles/aws/cloudfront/Dockerfile similarity index 100% rename from blueprints/dns/redirect/aws/Dockerfile rename to incubator/roles/aws/cloudfront/Dockerfile diff --git a/incubator/roles/aws/cloudfront/README.md b/incubator/roles/aws/cloudfront/README.md new file mode 100644 index 00000000..8d2d9ceb --- /dev/null +++ b/incubator/roles/aws/cloudfront/README.md @@ -0,0 +1,17 @@ +# AWS EC2 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Cloudfront distributions specific to this blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/cloudfront/blueprint.tf b/incubator/roles/aws/cloudfront/blueprint.tf new file mode 100644 index 00000000..92bc09d4 --- /dev/null +++ b/incubator/roles/aws/cloudfront/blueprint.tf @@ -0,0 +1,30 @@ +resource "aws_iam_role" "blueprint" { + name = "cloudfront-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "cloudfront_full_access" { + policy_arn = "arn:aws:iam::aws:policy/CloudFrontFullAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "s3_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonS3FullAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "route53_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "lambda_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AWSLambdaReadOnlyAccess" + role = aws_iam_role.blueprint.id +} + +resource "aws_iam_role_policy_attachment" "iam_servicerole_create" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-servicerole-create" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/cloudfront/lambda.tf b/incubator/roles/aws/cloudfront/lambda.tf new file mode 100644 index 00000000..24b727ad --- /dev/null +++ b/incubator/roles/aws/cloudfront/lambda.tf @@ -0,0 +1,20 @@ +data "aws_iam_policy_document" "lambda_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["lambda.amazonaws.com", "edgelambda.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "edgelambda" { + name = "cloudfront-edgelambda-role" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_logs" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-logs" + role = aws_iam_role.edgelambda.id +} diff --git a/incubator/roles/aws/cloudfront/main.tf b/incubator/roles/aws/cloudfront/main.tf new file mode 100644 index 00000000..3af97afe --- /dev/null +++ b/incubator/roles/aws/cloudfront/main.tf @@ -0,0 +1,23 @@ +/** + * # AWS EC2 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Cloudfront distributions specific to this blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} diff --git a/incubator/roles/aws/cloudfront/provider.tf b/incubator/roles/aws/cloudfront/provider.tf new file mode 100644 index 00000000..5d2981b5 --- /dev/null +++ b/incubator/roles/aws/cloudfront/provider.tf @@ -0,0 +1,16 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/network/private/openstack/Dockerfile b/incubator/roles/aws/cloudwatch/Dockerfile similarity index 100% rename from blueprints/network/private/openstack/Dockerfile rename to incubator/roles/aws/cloudwatch/Dockerfile diff --git a/incubator/roles/aws/cloudwatch/README.md b/incubator/roles/aws/cloudwatch/README.md new file mode 100644 index 00000000..1a5d7632 --- /dev/null +++ b/incubator/roles/aws/cloudwatch/README.md @@ -0,0 +1,18 @@ +# AWS CloudWatch role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* Access for managing CloudWatch events specific to this blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/cloudwatch/main.tf b/incubator/roles/aws/cloudwatch/main.tf new file mode 100644 index 00000000..96304d48 --- /dev/null +++ b/incubator/roles/aws/cloudwatch/main.tf @@ -0,0 +1,39 @@ +/** + * # AWS CloudWatch role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * Access for managing CloudWatch events specific to this blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "cloudwatch-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_access" { + policy_arn = "arn:aws:iam::aws:policy/CloudWatchEventsFullAccess" + role = aws_iam_role.blueprint.id +} + +resource "aws_iam_role_policy_attachment" "lambda_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AWSLambdaFullAccess" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/cloudwatch/provider.tf b/incubator/roles/aws/cloudwatch/provider.tf new file mode 100644 index 00000000..5d2981b5 --- /dev/null +++ b/incubator/roles/aws/cloudwatch/provider.tf @@ -0,0 +1,16 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/cloudwatch/vars.tf b/incubator/roles/aws/cloudwatch/vars.tf new file mode 100644 index 00000000..58c9814d --- /dev/null +++ b/incubator/roles/aws/cloudwatch/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_required" { + description = "Indicates whether users assuming this role must have MFA enabled" + default = "true" +} diff --git a/blueprints/storage/block/do/Dockerfile b/incubator/roles/aws/codebuild/Dockerfile similarity index 100% rename from blueprints/storage/block/do/Dockerfile rename to incubator/roles/aws/codebuild/Dockerfile diff --git a/incubator/roles/aws/codebuild/README.md b/incubator/roles/aws/codebuild/README.md new file mode 100644 index 00000000..3d8d66b1 --- /dev/null +++ b/incubator/roles/aws/codebuild/README.md @@ -0,0 +1,18 @@ +# AWS S3 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* Access for managing Codebuild projects specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/codebuild/main.tf b/incubator/roles/aws/codebuild/main.tf new file mode 100644 index 00000000..186fa4b0 --- /dev/null +++ b/incubator/roles/aws/codebuild/main.tf @@ -0,0 +1,74 @@ +/** + * # AWS S3 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * Access for managing Codebuild projects specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "codebuild-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "codebuild_access" { + policy_arn = "arn:aws:iam::aws:policy/AWSCodeBuildAdminAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "iam_readonly" { + policy_arn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-passrole" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "kms_poweruser" { + policy_arn = "arn:aws:iam::aws:policy/AWSKeyManagementServicePowerUser" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "kms_keymanagement" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-kms-keymanagement" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "kms_encryption" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-kms-encryption" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "ssm_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "ssm_params" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-ssm-params" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "s3_readonly_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess" + role = aws_iam_role.blueprint.name +} diff --git a/incubator/roles/aws/codebuild/provider.tf b/incubator/roles/aws/codebuild/provider.tf new file mode 100644 index 00000000..5e0bb998 --- /dev/null +++ b/incubator/roles/aws/codebuild/provider.tf @@ -0,0 +1,15 @@ +provider "aws" { + version = ">= 2.7.0" + region = "${var.region}" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "region" { + description = "AWS default region" +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/storage/file/sync/Dockerfile b/incubator/roles/aws/cognito/Dockerfile similarity index 100% rename from blueprints/storage/file/sync/Dockerfile rename to incubator/roles/aws/cognito/Dockerfile diff --git a/incubator/roles/aws/cognito/README.md b/incubator/roles/aws/cognito/README.md new file mode 100644 index 00000000..bf9fdff9 --- /dev/null +++ b/incubator/roles/aws/cognito/README.md @@ -0,0 +1,18 @@ +# AWS S3 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* Access for managing Cognito user pools specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/cognito/main.tf b/incubator/roles/aws/cognito/main.tf new file mode 100644 index 00000000..e23bb186 --- /dev/null +++ b/incubator/roles/aws/cognito/main.tf @@ -0,0 +1,34 @@ +/** + * # AWS S3 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * Access for managing Cognito user pools specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "cognito-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "cognito_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonCognitoPowerUser" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/cognito/provider.tf b/incubator/roles/aws/cognito/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/cognito/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/swapfile/linux/Dockerfile b/incubator/roles/aws/dynamodb/Dockerfile similarity index 100% rename from blueprints/swapfile/linux/Dockerfile rename to incubator/roles/aws/dynamodb/Dockerfile diff --git a/incubator/roles/aws/dynamodb/README.md b/incubator/roles/aws/dynamodb/README.md new file mode 100644 index 00000000..ba415968 --- /dev/null +++ b/incubator/roles/aws/dynamodb/README.md @@ -0,0 +1,18 @@ +# AWS DynamoDB role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing DynamoDB tables specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/dynamodb/lambda.tf b/incubator/roles/aws/dynamodb/lambda.tf new file mode 100644 index 00000000..b6c88d7b --- /dev/null +++ b/incubator/roles/aws/dynamodb/lambda.tf @@ -0,0 +1,30 @@ +data "aws_iam_policy_document" "lambda_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["lambda.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "lambda" { + name = "dynamodb-lambda-role" + description = "Role assumed by DynamoDB Lambda functions" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "s3_readonly_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess" + role = aws_iam_role.lambda.id +} + +resource "aws_iam_role_policy_attachment" "dynamodb_fullaccess" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-dynamodb-fullaccess" + role = aws_iam_role.lambda.id +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_logs" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-logs" + role = aws_iam_role.lambda.id +} diff --git a/incubator/roles/aws/dynamodb/main.tf b/incubator/roles/aws/dynamodb/main.tf new file mode 100644 index 00000000..d845fb9e --- /dev/null +++ b/incubator/roles/aws/dynamodb/main.tf @@ -0,0 +1,44 @@ +/** + * # AWS DynamoDB role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing DynamoDB tables specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "dynamodb-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "dynamodb_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "lambda_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AWSLambdaFullAccess" + role = aws_iam_role.blueprint.id +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-passrole" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/dynamodb/provider.tf b/incubator/roles/aws/dynamodb/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/dynamodb/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/blueprints/tfe/organization/Dockerfile b/incubator/roles/aws/ec2/Dockerfile similarity index 100% rename from blueprints/tfe/organization/Dockerfile rename to incubator/roles/aws/ec2/Dockerfile diff --git a/incubator/roles/aws/ec2/README.md b/incubator/roles/aws/ec2/README.md new file mode 100644 index 00000000..517c8b76 --- /dev/null +++ b/incubator/roles/aws/ec2/README.md @@ -0,0 +1,17 @@ +# AWS EC2 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Lambda functions specific to this blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/ec2/lambda.tf b/incubator/roles/aws/ec2/lambda.tf new file mode 100644 index 00000000..e67e9abf --- /dev/null +++ b/incubator/roles/aws/ec2/lambda.tf @@ -0,0 +1,25 @@ +data "aws_iam_policy_document" "lambda_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["lambda.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "lambda" { + name = "ec2-lambda-role" + description = "Role assumed by EC2 Lambda functions" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "ec2_fullaccess" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2FullAccess" + role = aws_iam_role.lambda.id +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_logs" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-logs" + role = aws_iam_role.lambda.id +} diff --git a/incubator/roles/aws/ec2/main.tf b/incubator/roles/aws/ec2/main.tf new file mode 100644 index 00000000..0a59fee3 --- /dev/null +++ b/incubator/roles/aws/ec2/main.tf @@ -0,0 +1,38 @@ +/** + * # AWS EC2 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Lambda functions specific to this blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "ec2-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "lambda_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AWSLambdaFullAccess" + role = aws_iam_role.blueprint.id +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-passrole" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/ec2/provider.tf b/incubator/roles/aws/ec2/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/ec2/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/ecr/Dockerfile b/incubator/roles/aws/ecr/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/aws/ecr/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/aws/ecr/README.md b/incubator/roles/aws/ecr/README.md new file mode 100644 index 00000000..6ca80020 --- /dev/null +++ b/incubator/roles/aws/ecr/README.md @@ -0,0 +1,18 @@ +# AWS S3 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* Access for managing ECR repositories specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/ecr/blueprint.tf b/incubator/roles/aws/ecr/blueprint.tf new file mode 100644 index 00000000..15e365ad --- /dev/null +++ b/incubator/roles/aws/ecr/blueprint.tf @@ -0,0 +1,12 @@ +resource "aws_iam_role" "blueprint" { + name = "ecr-blueprint-role" + description = "Role assumed by Bedrock blueprints" + path = var.role_path + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json + force_detach_policies = true +} + +resource "aws_iam_role_policy_attachment" "ecr_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryFullAccess" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/ecr/main.tf b/incubator/roles/aws/ecr/main.tf new file mode 100644 index 00000000..76eb18f1 --- /dev/null +++ b/incubator/roles/aws/ecr/main.tf @@ -0,0 +1,23 @@ +/** + * # AWS ECR Roles + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * Access for managing ECR repositories specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} diff --git a/incubator/roles/aws/ecr/provider.tf b/incubator/roles/aws/ecr/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/ecr/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/ecr/vars.tf b/incubator/roles/aws/ecr/vars.tf new file mode 100644 index 00000000..fe55c844 --- /dev/null +++ b/incubator/roles/aws/ecr/vars.tf @@ -0,0 +1,4 @@ +variable "role_path" { + description = "The path applied to blueprint roles" + default = "/" +} diff --git a/incubator/roles/aws/ecs/Dockerfile b/incubator/roles/aws/ecs/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/aws/ecs/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/aws/ecs/README.md b/incubator/roles/aws/ecs/README.md new file mode 100644 index 00000000..5ac9d9fa --- /dev/null +++ b/incubator/roles/aws/ecs/README.md @@ -0,0 +1,20 @@ +# AWS ECS role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing ECS clusters specific to this blueprint +* Access for managing ECS services specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/ecs/blueprint.tf b/incubator/roles/aws/ecs/blueprint.tf new file mode 100644 index 00000000..9fa1e860 --- /dev/null +++ b/incubator/roles/aws/ecs/blueprint.tf @@ -0,0 +1,22 @@ +resource "aws_iam_role" "blueprint" { + name = "ecs-blueprint-role" + description = "Role assumed by Bedrock blueprints" + path = var.role_path + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json + force_detach_policies = true +} + +resource "aws_iam_role_policy_attachment" "iam_readonly" { + policy_arn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "route53_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53ReadOnlyAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "cloudformation_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-passrole" + role = aws_iam_role.blueprint.name +} diff --git a/incubator/roles/aws/ecs/cloudformation.tf b/incubator/roles/aws/ecs/cloudformation.tf new file mode 100644 index 00000000..0688cbd1 --- /dev/null +++ b/incubator/roles/aws/ecs/cloudformation.tf @@ -0,0 +1,22 @@ +data "aws_iam_policy_document" "cloudformation_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["cloudformation.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "ecs_cloudformation" { + name = "ecs-cloudformation-role" + description = "Role assumed by ECS Cloudformation stacks" + path = var.role_path + assume_role_policy = data.aws_iam_policy_document.cloudformation_assume_role_policy.json + force_detach_policies = true +} + +resource "aws_iam_role_policy_attachment" "ec2_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2FullAccess" + role = aws_iam_role.ecs_cloudformation.name +} diff --git a/incubator/roles/aws/ecs/main.tf b/incubator/roles/aws/ecs/main.tf new file mode 100644 index 00000000..c2521c90 --- /dev/null +++ b/incubator/roles/aws/ecs/main.tf @@ -0,0 +1,53 @@ +/** + * # AWS ECS role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing ECS clusters specific to this blueprint + * * Access for managing ECS services specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +data "aws_iam_policy" "cloudformation_create_policy" { + arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-create" +} + +resource "aws_iam_role" "serviceadmin" { + name = "ecs-service-admin" + path = var.role_path + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json + force_detach_policies = true +} + +resource "aws_iam_role" "codedeploy" { + name = "ecs-codedeploy-role" + path = var.role_path + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_policy_attachment" "ecs_access" { + name = "bedrock-ecs-access" + policy_arn = "arn:aws:iam::aws:policy/AmazonECS_FullAccess" + roles = [aws_iam_role.blueprint.name, aws_iam_role.serviceadmin.name] +} + +resource "aws_iam_policy_attachment" "cloudformation_create" { + name = "bedrock-ecs-cloudformation" + policy_arn = data.aws_iam_policy.cloudformation_create_policy.arn + roles = [aws_iam_role.blueprint.id, aws_iam_role.serviceadmin.id] +} diff --git a/incubator/roles/aws/ecs/provider.tf b/incubator/roles/aws/ecs/provider.tf new file mode 100644 index 00000000..7c3fb138 --- /dev/null +++ b/incubator/roles/aws/ecs/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.19.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/ecs/vars.tf b/incubator/roles/aws/ecs/vars.tf new file mode 100644 index 00000000..fe55c844 --- /dev/null +++ b/incubator/roles/aws/ecs/vars.tf @@ -0,0 +1,4 @@ +variable "role_path" { + description = "The path applied to blueprint roles" + default = "/" +} diff --git a/incubator/roles/aws/iam/Dockerfile b/incubator/roles/aws/iam/Dockerfile new file mode 100644 index 00000000..9683bff8 --- /dev/null +++ b/incubator/roles/aws/iam/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.sh *.tf /bootstrap/ diff --git a/incubator/roles/aws/iam/README.md b/incubator/roles/aws/iam/README.md new file mode 100644 index 00000000..7057ca4b --- /dev/null +++ b/incubator/roles/aws/iam/README.md @@ -0,0 +1,18 @@ +# AWS IAM role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* IAM access for creation of IAM roles specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/iam/lambda.tf b/incubator/roles/aws/iam/lambda.tf new file mode 100644 index 00000000..2b34ec82 --- /dev/null +++ b/incubator/roles/aws/iam/lambda.tf @@ -0,0 +1,35 @@ +data "aws_iam_policy_document" "lambda_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["lambda.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "lambda" { + name = "iam-lambda-role" + description = "Role assumed by IAM Lambda functions" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "iam_readonly" { + policy_arn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + role = aws_iam_role.lambda.name +} + +resource "aws_iam_role_policy_attachment" "iam_keyrotation" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-keyrotation" + role = aws_iam_role.lambda.name +} + +resource "aws_iam_role_policy_attachment" "lambda_config" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-lambda-config" + role = aws_iam_role.lambda.name +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_logs" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-logs" + role = aws_iam_role.lambda.name +} diff --git a/incubator/roles/aws/iam/main.tf b/incubator/roles/aws/iam/main.tf new file mode 100644 index 00000000..3e27bc68 --- /dev/null +++ b/incubator/roles/aws/iam/main.tf @@ -0,0 +1,40 @@ +/** + * # AWS IAM role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * IAM access for creation of IAM roles specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "iam-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "iam_role_access" { + policy_arn = "arn:aws:iam::aws:policy/IAMFullAccess" + role = aws_iam_role.blueprint.name +} + +// lambda for automated user key rotation.. +resource "aws_iam_role_policy_attachment" "lambda_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AWSLambdaFullAccess" + role = aws_iam_role.blueprint.name +} diff --git a/incubator/roles/aws/iam/provider.tf b/incubator/roles/aws/iam/provider.tf new file mode 100644 index 00000000..1728467e --- /dev/null +++ b/incubator/roles/aws/iam/provider.tf @@ -0,0 +1,3 @@ +provider "aws" { + version = ">= 2.7.0" +} diff --git a/incubator/roles/aws/iam/vars.tf b/incubator/roles/aws/iam/vars.tf new file mode 100644 index 00000000..c71cdf27 --- /dev/null +++ b/incubator/roles/aws/iam/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_required" { + description = "Indicates whether users assuming this role must have MFA enabled" + default = "true" +} \ No newline at end of file diff --git a/incubator/roles/aws/lambda/Dockerfile b/incubator/roles/aws/lambda/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/aws/lambda/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/aws/lambda/README.md b/incubator/roles/aws/lambda/README.md new file mode 100644 index 00000000..517c8b76 --- /dev/null +++ b/incubator/roles/aws/lambda/README.md @@ -0,0 +1,17 @@ +# AWS EC2 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Lambda functions specific to this blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/lambda/main.tf b/incubator/roles/aws/lambda/main.tf new file mode 100644 index 00000000..4f3acf23 --- /dev/null +++ b/incubator/roles/aws/lambda/main.tf @@ -0,0 +1,33 @@ +/** + * # AWS EC2 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Lambda functions specific to this blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "lambda-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "lambda_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AWSLambdaFullAccess" + role = aws_iam_role.blueprint.id +} diff --git a/blueprints/aws/lambda/layer/provider.tf b/incubator/roles/aws/lambda/provider.tf similarity index 91% rename from blueprints/aws/lambda/layer/provider.tf rename to incubator/roles/aws/lambda/provider.tf index 28fb7f51..882b5fe8 100644 --- a/blueprints/aws/lambda/layer/provider.tf +++ b/incubator/roles/aws/lambda/provider.tf @@ -1,6 +1,6 @@ provider "aws" { assume_role { - role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-lambda-admin" + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" } } diff --git a/incubator/roles/aws/rds/Dockerfile b/incubator/roles/aws/rds/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/aws/rds/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/aws/rds/README.md b/incubator/roles/aws/rds/README.md new file mode 100644 index 00000000..ed25141e --- /dev/null +++ b/incubator/roles/aws/rds/README.md @@ -0,0 +1,19 @@ +# AWS RDS role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* Access for managing RDS clusters and instances specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/rds/lambda.tf b/incubator/roles/aws/rds/lambda.tf new file mode 100644 index 00000000..011e649e --- /dev/null +++ b/incubator/roles/aws/rds/lambda.tf @@ -0,0 +1,20 @@ +data "aws_iam_policy_document" "lambda_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["lambda.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "lambda" { + name = "rds-lambda-role" + description = "Role assumed by RDS Lambda functions" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "rds_admin" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-rds-admin" + role = aws_iam_role.lambda.name +} diff --git a/incubator/roles/aws/rds/main.tf b/incubator/roles/aws/rds/main.tf new file mode 100644 index 00000000..4224feab --- /dev/null +++ b/incubator/roles/aws/rds/main.tf @@ -0,0 +1,34 @@ +/** + * # AWS RDS role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * Access for managing RDS clusters and instances specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "rds-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "rds_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRDSFullAccess" + role = aws_iam_role.blueprint.name +} diff --git a/incubator/roles/aws/rds/provider.tf b/incubator/roles/aws/rds/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/rds/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/rds/vars.tf b/incubator/roles/aws/rds/vars.tf new file mode 100644 index 00000000..c71cdf27 --- /dev/null +++ b/incubator/roles/aws/rds/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_required" { + description = "Indicates whether users assuming this role must have MFA enabled" + default = "true" +} \ No newline at end of file diff --git a/incubator/roles/aws/route53/Dockerfile b/incubator/roles/aws/route53/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/aws/route53/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/aws/route53/README.md b/incubator/roles/aws/route53/README.md new file mode 100644 index 00000000..97db358a --- /dev/null +++ b/incubator/roles/aws/route53/README.md @@ -0,0 +1,19 @@ +# AWS Route53 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* Route53 access for creation of hosted zones specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/route53/blueprint.tf b/incubator/roles/aws/route53/blueprint.tf new file mode 100644 index 00000000..adc0f8b9 --- /dev/null +++ b/incubator/roles/aws/route53/blueprint.tf @@ -0,0 +1,27 @@ +resource "aws_iam_role" "blueprint" { + name = "route53-blueprint-role" + description = "Role assumed by Bedrock blueprints" + path = var.role_path + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json + force_detach_policies = true +} + +resource "aws_iam_role_policy_attachment" "route53_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "vpc_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonVPCReadOnlyAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "route53_additional" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-route53-zoneaccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "s3_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonS3FullAccess" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/route53/main.tf b/incubator/roles/aws/route53/main.tf new file mode 100644 index 00000000..9b586273 --- /dev/null +++ b/incubator/roles/aws/route53/main.tf @@ -0,0 +1,23 @@ +/** + * # AWS Route53 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * Route53 access for creation of hosted zones specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} diff --git a/incubator/roles/aws/route53/provider.tf b/incubator/roles/aws/route53/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/route53/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/route53/vars.tf b/incubator/roles/aws/route53/vars.tf new file mode 100644 index 00000000..fe55c844 --- /dev/null +++ b/incubator/roles/aws/route53/vars.tf @@ -0,0 +1,4 @@ +variable "role_path" { + description = "The path applied to blueprint roles" + default = "/" +} diff --git a/incubator/roles/aws/s3/Dockerfile b/incubator/roles/aws/s3/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/aws/s3/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/aws/s3/README.md b/incubator/roles/aws/s3/README.md new file mode 100644 index 00000000..ab8af317 --- /dev/null +++ b/incubator/roles/aws/s3/README.md @@ -0,0 +1,19 @@ +# AWS S3 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* Access for managing S3 buckets specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/s3/lambda.tf b/incubator/roles/aws/s3/lambda.tf new file mode 100644 index 00000000..01512b60 --- /dev/null +++ b/incubator/roles/aws/s3/lambda.tf @@ -0,0 +1,20 @@ +data "aws_iam_policy_document" "lambda_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["lambda.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "lambda" { + name = "s3-lambda-role" + description = "Role assumed by S3 Lambda functions" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "lambda_cloudwatch_logs" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-logs" + role = aws_iam_role.lambda.id +} diff --git a/incubator/roles/aws/s3/main.tf b/incubator/roles/aws/s3/main.tf new file mode 100644 index 00000000..eceb2d01 --- /dev/null +++ b/incubator/roles/aws/s3/main.tf @@ -0,0 +1,49 @@ +/** + * # AWS S3 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * Access for managing S3 buckets specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "s3-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "s3_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonS3FullAccess" + role = aws_iam_role.blueprint.id +} + +resource "aws_iam_role_policy_attachment" "route53_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "kms_poweruser" { + policy_arn = "arn:aws:iam::aws:policy/AWSKeyManagementServicePowerUser" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "kms_keymanagement" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-kms-keymanagement" + role = aws_iam_role.blueprint.name +} diff --git a/incubator/roles/aws/s3/provider.tf b/incubator/roles/aws/s3/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/s3/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/s3/vars.tf b/incubator/roles/aws/s3/vars.tf new file mode 100644 index 00000000..c71cdf27 --- /dev/null +++ b/incubator/roles/aws/s3/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_required" { + description = "Indicates whether users assuming this role must have MFA enabled" + default = "true" +} \ No newline at end of file diff --git a/incubator/roles/aws/ses/Dockerfile b/incubator/roles/aws/ses/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/aws/ses/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/aws/ses/README.md b/incubator/roles/aws/ses/README.md new file mode 100644 index 00000000..cb016b3d --- /dev/null +++ b/incubator/roles/aws/ses/README.md @@ -0,0 +1,19 @@ +# AWS Route53 role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* SES access for management of emails +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/ses/main.tf b/incubator/roles/aws/ses/main.tf new file mode 100644 index 00000000..bd19cfa7 --- /dev/null +++ b/incubator/roles/aws/ses/main.tf @@ -0,0 +1,34 @@ +/** + * # AWS Route53 role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * SES access for management of emails + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "ses-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "s3_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonS3FullAccess" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/ses/provider.tf b/incubator/roles/aws/ses/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/ses/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/ses/vars.tf b/incubator/roles/aws/ses/vars.tf new file mode 100644 index 00000000..c71cdf27 --- /dev/null +++ b/incubator/roles/aws/ses/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_required" { + description = "Indicates whether users assuming this role must have MFA enabled" + default = "true" +} \ No newline at end of file diff --git a/incubator/roles/aws/spotfleet/README.md b/incubator/roles/aws/spotfleet/README.md new file mode 100644 index 00000000..a216315d --- /dev/null +++ b/incubator/roles/aws/spotfleet/README.md @@ -0,0 +1,19 @@ +# AWS SpotFleet role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Spot Fleet clusters specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/vpc/Dockerfile b/incubator/roles/aws/vpc/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/aws/vpc/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/aws/vpc/README.md b/incubator/roles/aws/vpc/README.md new file mode 100644 index 00000000..05236015 --- /dev/null +++ b/incubator/roles/aws/vpc/README.md @@ -0,0 +1,19 @@ +# AWS VPC role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create a role that has the following privileges: + +* Access for managing VPCs specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `true` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/aws/vpc/cloudformation.tf b/incubator/roles/aws/vpc/cloudformation.tf new file mode 100644 index 00000000..1f7ae4cd --- /dev/null +++ b/incubator/roles/aws/vpc/cloudformation.tf @@ -0,0 +1,25 @@ +data "aws_iam_policy_document" "cloudformation_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["cloudformation.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "cloudformation" { + name = "vpc-cloudformation-role" + description = "Role assumed by VPC Cloudformation stacks" + assume_role_policy = data.aws_iam_policy_document.cloudformation_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "vpc_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonVPCFullAccess" + role = aws_iam_role.cloudformation.name +} + +resource "aws_iam_role_policy_attachment" "cloudformation_iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-passrole" + role = aws_iam_role.cloudformation.name +} diff --git a/incubator/roles/aws/vpc/main.tf b/incubator/roles/aws/vpc/main.tf new file mode 100644 index 00000000..30db73c2 --- /dev/null +++ b/incubator/roles/aws/vpc/main.tf @@ -0,0 +1,59 @@ +/** + * # AWS VPC role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * Access for managing VPCs specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "blueprint" { + name = "vpc-blueprint-role" + description = "Role assumed by Bedrock blueprints" + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "ec2_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "iam_readonly" { + policy_arn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "cloudformation_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-passrole" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "cloudformation_create" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-create" + role = aws_iam_role.blueprint.name +} + +resource "aws_iam_role_policy_attachment" "ec2_subnet_fullaccess" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-ec2-subnet-fullaccess" + role = aws_iam_role.blueprint.id +} diff --git a/incubator/roles/aws/vpc/provider.tf b/incubator/roles/aws/vpc/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/aws/vpc/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/aws/vpc/vars.tf b/incubator/roles/aws/vpc/vars.tf new file mode 100644 index 00000000..c71cdf27 --- /dev/null +++ b/incubator/roles/aws/vpc/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_required" { + description = "Indicates whether users assuming this role must have MFA enabled" + default = "true" +} \ No newline at end of file diff --git a/incubator/roles/awstats/aws/Dockerfile b/incubator/roles/awstats/aws/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/awstats/aws/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/awstats/aws/README.md b/incubator/roles/awstats/aws/README.md new file mode 100644 index 00000000..897d1f2d --- /dev/null +++ b/incubator/roles/awstats/aws/README.md @@ -0,0 +1,17 @@ +# AWS AWstats role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing AWstats hosts specific to this blueprint + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/awstats/aws/cloudformation.tf b/incubator/roles/awstats/aws/cloudformation.tf new file mode 100644 index 00000000..7295de82 --- /dev/null +++ b/incubator/roles/awstats/aws/cloudformation.tf @@ -0,0 +1,40 @@ +data "aws_iam_policy_document" "cloudformation_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["cloudformation.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "awstats_cloudformation" { + name = "bedrock-awstats-cloudformation" + assume_role_policy = "${data.aws_iam_policy_document.cloudformation_assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2FullAccess" + role = "${aws_iam_role.awstats_cloudformation.name}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.awstats_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "ec2_instance_profile_fullaccess" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-ec2-instance-profile-fullaccess" + role = "${aws_iam_role.awstats_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_log_groups" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-log-groups" + role = "${aws_iam_role.awstats_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "route53_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" + role = "${aws_iam_role.awstats_cloudformation.name}" +} diff --git a/incubator/roles/awstats/aws/main.tf b/incubator/roles/awstats/aws/main.tf new file mode 100644 index 00000000..1352d535 --- /dev/null +++ b/incubator/roles/awstats/aws/main.tf @@ -0,0 +1,53 @@ +/** + * # AWS AWstats role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing AWstats hosts specific to this blueprint + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "awstats_admin" { + name = "bedrock-awstats-admin" + assume_role_policy = "${data.aws_iam_policy_document.assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" + role = "${aws_iam_role.awstats_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_readonly" { + policy_arn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + role = "${aws_iam_role.awstats_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess" + role = "${aws_iam_role.awstats_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.awstats_admin.id}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_create" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-create" + role = "${aws_iam_role.awstats_admin.id}" +} diff --git a/incubator/roles/awstats/aws/provider.tf b/incubator/roles/awstats/aws/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/awstats/aws/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/bastion/aws/Dockerfile b/incubator/roles/bastion/aws/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/bastion/aws/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/bastion/aws/README.md b/incubator/roles/bastion/aws/README.md new file mode 100644 index 00000000..e094fd77 --- /dev/null +++ b/incubator/roles/bastion/aws/README.md @@ -0,0 +1,18 @@ +# AWS Bastion role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing Bastion hosts specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/bastion/aws/instance_profile.tf b/incubator/roles/bastion/aws/instance_profile.tf new file mode 100644 index 00000000..6dbcdc7b --- /dev/null +++ b/incubator/roles/bastion/aws/instance_profile.tf @@ -0,0 +1,25 @@ +data "aws_iam_policy_document" "ec2_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = ["ec2.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "bastion_instance" { + name = "bedrock-bastion-instance" + assume_role_policy = "${data.aws_iam_policy_document.ec2_assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_ssm" { + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM" + role = "${aws_iam_role.bastion_instance.name}" +} + +resource "aws_iam_role_policy_attachment" "ec2_cloudwatch" { + policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" + role = "${aws_iam_role.bastion_instance.name}" +} diff --git a/incubator/roles/bastion/aws/main.tf b/incubator/roles/bastion/aws/main.tf new file mode 100644 index 00000000..30faa4fc --- /dev/null +++ b/incubator/roles/bastion/aws/main.tf @@ -0,0 +1,52 @@ +/** + * # AWS Bastion role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing Bastion hosts specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + identifiers = [ + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root", + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/bedrock-iam-admin" + ] + type = "AWS" + } + } +} + +resource "aws_iam_role" "bastion_admin" { + name = "bedrock-bastion-admin" + assume_role_policy = "${data.aws_iam_policy_document.assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2FullAccess" + role = "${aws_iam_role.bastion_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.bastion_admin.id}" +} + +resource "aws_iam_role_policy_attachment" "ec2_instance_profile_fullaccess" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-ec2-instance-profile-fullaccess" + role = "${aws_iam_role.bastion_admin.id}" +} + +resource "aws_iam_role_policy_attachment" "route53_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" + role = "${aws_iam_role.bastion_admin.name}" +} diff --git a/incubator/roles/bastion/aws/provider.tf b/incubator/roles/bastion/aws/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/bastion/aws/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/blueprint/Dockerfile b/incubator/roles/blueprint/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/blueprint/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/blueprint/main.tf b/incubator/roles/blueprint/main.tf new file mode 100644 index 00000000..23f56a62 --- /dev/null +++ b/incubator/roles/blueprint/main.tf @@ -0,0 +1,37 @@ +/** + * # AWS IAM role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create a role that has the following privileges: + * + * * IAM access to assume other IAM roles specific a blueprint + * * Access to read/write Terraform state associated with the account + * * Access to manage Terraform state locks associated with the blueprint + */ +data "aws_caller_identity" "current" {} + +module "iam_policies" { + source = "micronode/iam-policies/aws" + + region = var.region + name_prefix = "bedrock" + assume_role_filter = "*-blueprint-role" +} + +module "blueprintadmin" { + source = "micronode/iam-role/aws" + + name = "bedrock-blueprint-admin" + description = "Bedrock role used to provision blueprints and blueprint roles" + path = var.role_path + principal = "account" + policies = [ + module.iam_policies.iam_assumerole_arn, + module.iam_policies.s3_terraform_access_arn, + module.iam_policies.dynamodb_terraform_access_arn, + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser", + ] +} diff --git a/incubator/roles/blueprint/vars.tf b/incubator/roles/blueprint/vars.tf new file mode 100644 index 00000000..aaa68d1c --- /dev/null +++ b/incubator/roles/blueprint/vars.tf @@ -0,0 +1,8 @@ +variable "region" { + description = "Selected AWS region" +} + +variable "role_path" { + description = "The path applied to blueprint roles" + default = "/" +} diff --git a/incubator/roles/nginx/aws/Dockerfile b/incubator/roles/nginx/aws/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/incubator/roles/nginx/aws/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/incubator/roles/nginx/aws/README.md b/incubator/roles/nginx/aws/README.md new file mode 100644 index 00000000..c5e673a8 --- /dev/null +++ b/incubator/roles/nginx/aws/README.md @@ -0,0 +1,19 @@ +# AWS NGINX role configuration + +Purpose: Provision IAM roles in AWS. + +Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + +This script will create roles that has the following privileges: + +* Access for managing NGINX hosts specific to this blueprint +* Access to read/write Terraform state associated with the account + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| assume\_role\_account | AWS account ID for the role to assume into | string | - | yes | +| mfa\_required | Indicates whether users assuming this role must have MFA enabled | string | `false` | no | +| region | AWS default region | string | - | yes | + diff --git a/incubator/roles/nginx/aws/cloudformation.tf b/incubator/roles/nginx/aws/cloudformation.tf new file mode 100644 index 00000000..5f26b913 --- /dev/null +++ b/incubator/roles/nginx/aws/cloudformation.tf @@ -0,0 +1,39 @@ +data "aws_iam_policy_document" "cloudformation_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["cloudformation.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "nginx_cloudformation" { + name = "bedrock-nginx-cloudformation" + assume_role_policy = "${data.aws_iam_policy_document.cloudformation_assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_full_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2FullAccess" + role = "${aws_iam_role.nginx_cloudformation.name}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.nginx_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "ec2_instance_profile_fullaccess" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-ec2-instance-profile-fullaccess" + role = "${aws_iam_role.nginx_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_log_groups" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudwatch-log-groups" + role = "${aws_iam_role.nginx_cloudformation.id}" +} + +resource "aws_iam_role_policy_attachment" "route53_access" { + policy_arn = "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" + role = "${aws_iam_role.nginx_cloudformation.name}" +} diff --git a/incubator/roles/nginx/aws/main.tf b/incubator/roles/nginx/aws/main.tf new file mode 100644 index 00000000..ff6ce95f --- /dev/null +++ b/incubator/roles/nginx/aws/main.tf @@ -0,0 +1,53 @@ +/** + * # AWS NGINX role configuration + * + * Purpose: Provision IAM roles in AWS. + * + * Rationale: Bedrock blueprints use IAM roles to restrict the privileges of the provisioner. + * + * This script will create roles that has the following privileges: + * + * * Access for managing NGINX hosts specific to this blueprint + * * Access to read/write Terraform state associated with the account + */ +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + principals { + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + type = "AWS" + } + } +} + +resource "aws_iam_role" "nginx_admin" { + name = "bedrock-nginx-admin" + assume_role_policy = "${data.aws_iam_policy_document.assume_role_policy.json}" +} + +resource "aws_iam_role_policy_attachment" "ec2_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" + role = "${aws_iam_role.nginx_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_readonly" { + policy_arn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + role = "${aws_iam_role.nginx_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_readonly" { + policy_arn = "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess" + role = "${aws_iam_role.nginx_admin.name}" +} + +resource "aws_iam_role_policy_attachment" "iam_passrole" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-iam-passrole" + role = "${aws_iam_role.nginx_admin.id}" +} + +resource "aws_iam_role_policy_attachment" "cloudformation_create" { + policy_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/bedrock-cloudformation-create" + role = "${aws_iam_role.nginx_admin.id}" +} diff --git a/incubator/roles/nginx/aws/provider.tf b/incubator/roles/nginx/aws/provider.tf new file mode 100644 index 00000000..98827b91 --- /dev/null +++ b/incubator/roles/nginx/aws/provider.tf @@ -0,0 +1,10 @@ +provider "aws" { + version = ">= 2.7.0" + assume_role { + role_arn = "arn:aws:iam::${var.assume_role_account}:role/bedrock-iam-admin" + } +} + +variable "assume_role_account" { + description = "AWS account ID for the role to assume into" +} diff --git a/incubator/roles/nginx/aws/vars.tf b/incubator/roles/nginx/aws/vars.tf new file mode 100644 index 00000000..479d94fe --- /dev/null +++ b/incubator/roles/nginx/aws/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_required" { + description = "Indicates whether users assuming this role must have MFA enabled" + default = "false" +} \ No newline at end of file diff --git a/rancher/README.md b/rancher/README.md new file mode 100644 index 00000000..3f7508a1 --- /dev/null +++ b/rancher/README.md @@ -0,0 +1,4 @@ +# Rancher Blueprints + +This directory contains blueprints associated with maintaining +Rancher services. diff --git a/rancher/env/Dockerfile b/rancher/env/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/rancher/env/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/rancher/env/README.md b/rancher/env/README.md new file mode 100644 index 00000000..807ea9ad --- /dev/null +++ b/rancher/env/README.md @@ -0,0 +1,16 @@ +# Rancher environment configuration + +Provision an environment on a Rancher server instance. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| enabled | Start/stop the rancher environment | string | - | yes | +| environment | Environment identifier for the rancher hosts | string | - | yes | +| host\_count | The number of hosts to create | string | `1` | no | +| rancher\_access\_key | Rancher API access key | string | - | yes | +| rancher\_secret\_key | Rancher API secret key | string | - | yes | +| rancher\_url | Base URL of Rancher API | string | `http://rancher.mnode.org` | no | +| region | Digitalocean region of the rancher environment | string | - | yes | + diff --git a/rancher/env/main.tf b/rancher/env/main.tf new file mode 100644 index 00000000..bd4bfeff --- /dev/null +++ b/rancher/env/main.tf @@ -0,0 +1,28 @@ +/** + * # Rancher environment configuration + * + * Provision an environment on a Rancher server instance. + */ +resource "rancher_environment" "environment" { + name = var.environment + orchestration = "cattle" +} + +resource "rancher_host" "host" { + count = var.host_count + environment_id = rancher_environment.environment.id + hostname = "rancheragent${count.index}-${var.environment}" + name = "rancheragent${count.index}-${var.environment}" +} + +resource "digitalocean_floating_ip" "rancheragent" { + droplet_id = rancher_host.host.id + region = var.do_region +} + +resource "rancher_stack" "cowcheck" { + environment_id = rancher_environment.environment.id + name = "cowcheck" + catalog_id = "community:cowcheck:0" + start_on_create = true +} \ No newline at end of file diff --git a/rancher/env/vars.tf b/rancher/env/vars.tf new file mode 100644 index 00000000..b108e861 --- /dev/null +++ b/rancher/env/vars.tf @@ -0,0 +1,21 @@ +variable "enabled" { + description = "Start/stop the rancher environment" + default = "true" +} + +variable "environment" { + description = "Environment identifier for the rancher hosts" +} + +variable "host_count" { + description = "The number of hosts to create" + default = "1" +} + +variable "do_region" { + description = "Digitalocean region of the rancher environment" +} + +locals { + uuid = "${var.environment}-rancherstack" +} diff --git a/blueprints/rancher/stack/provider.tf b/rancher/provider.tf similarity index 63% rename from blueprints/rancher/stack/provider.tf rename to rancher/provider.tf index 5ae614f4..ffd28937 100644 --- a/blueprints/rancher/stack/provider.tf +++ b/rancher/provider.tf @@ -1,7 +1,7 @@ provider "rancher" { - api_url = "${var.rancher_url}" - access_key = "${var.rancher_access_key}" - secret_key = "${var.rancher_secret_key}" + api_url = var.rancher_url + access_key = var.rancher_access_key + secret_key = var.rancher_secret_key } variable "rancher_url" { @@ -16,7 +16,3 @@ variable "rancher_access_key" { variable "rancher_secret_key" { description = "Rancher API secret key" } - -provider "null" { - version = ">= 2.1.0" -} diff --git a/rancher/stack/Dockerfile b/rancher/stack/Dockerfile new file mode 100644 index 00000000..4b9519aa --- /dev/null +++ b/rancher/stack/Dockerfile @@ -0,0 +1,3 @@ +FROM bedrock/blueprint-base + +ADD *.tf /bootstrap/ diff --git a/rancher/stack/README.md b/rancher/stack/README.md new file mode 100644 index 00000000..2b43f68f --- /dev/null +++ b/rancher/stack/README.md @@ -0,0 +1,25 @@ +# Rancher stack configuration + +Provision a stack on a Rancher server instance. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| bastion\_host | Bastion host used to access reverse proxy | string | - | yes | +| catalog\_id | ID of predefined stack in Rancher catalog | string | `` | no | +| docker\_compose | Location of docker-compose file | string | `docker-compose.yml` | no | +| enabled | Start/stop the rancher stack | string | - | yes | +| environment | Environment identifier for the rancher hosts | string | - | yes | +| hostname | Hostname to configure in virtual host | string | - | yes | +| rancher\_access\_key | Rancher API access key | string | - | yes | +| rancher\_compose | Location of rancher-compose file | string | `rancher-compose.yml` | no | +| rancher\_secret\_key | Rancher API secret key | string | - | yes | +| rancher\_url | Base URL of Rancher API | string | `http://rancher.mnode.org` | no | +| reverseproxy\_host | Host to install vhost configuration | string | - | yes | +| ssh\_private\_key | Location of private key file for SSH access to droplets | string | `~/.ssh/id_rsa` | no | +| ssl\_enabled | Enable SSL with Let's Encrypt | string | - | yes | +| stack\_name | Name of the Rancher stack | string | - | yes | +| target\_hosts | List of target hosts for vhost configuration | list | - | yes | +| target\_port | Target port for vhost configuration | string | `8080` | no | + diff --git a/rancher/stack/main.tf b/rancher/stack/main.tf new file mode 100644 index 00000000..8362d1d1 --- /dev/null +++ b/rancher/stack/main.tf @@ -0,0 +1,25 @@ +/** + * # Rancher stack configuration + * + * Provision a stack on a Rancher server instance. + */ +data "rancher_environment" "environment" { + name = var.environment +} + +resource "rancher_stack" "catalog_stack" { + count = replace(replace(length(var.catalog_id) > 0, "/true/", 1), "/false/", 0) + environment_id = data.rancher_environment.environment.id + name = var.stack_name + catalog_id = var.catalog_id + start_on_create = true +} + +resource "rancher_stack" "stack" { + count = replace(replace(length(var.catalog_id) > 0, "/false/", 1), "/true/", 0) + environment_id = data.rancher_environment.environment.id + name = var.stack_name + docker_compose = file(var.docker_compose) + rancher_compose = file(var.rancher_compose) + start_on_create = true +} diff --git a/rancher/stack/vars.tf b/rancher/stack/vars.tf new file mode 100644 index 00000000..4515f09f --- /dev/null +++ b/rancher/stack/vars.tf @@ -0,0 +1,66 @@ +//variable "ssh_key" { +// description = "Identifier of public key file for SSH access to droplets" +//} + +variable "ssh_private_key" { + description = "Location of private key file for SSH access to droplets" + default = "~/.ssh/id_rsa" +} + +variable "enabled" { + description = "Start/stop the rancher stack" + default = "true" +} + +variable "environment" { + description = "Environment identifier for the rancher hosts" +} + +variable "stack_name" { + description = "Name of the Rancher stack" +} + +variable "hostname" { + description = "Hostname to configure in virtual host" +} + +variable "nginx_host" { + description = "Host to install vhost configuration" +} + +variable "bastion_fqdn" { + description = "Bastion host used to access reverse proxy" +} + +variable "target_hosts" { + type = "list" + description = "List of target hosts for vhost configuration" +} + +variable "target_port" { + description = "Target port for vhost configuration" + default = "8080" +} + +variable "catalog_id" { + description = "ID of predefined stack in Rancher catalog" + default = "" +} + +variable "docker_compose" { + description = "Location of docker-compose file" + default = "docker-compose.yml" +} + +variable "rancher_compose" { + description = "Location of rancher-compose file" + default = "rancher-compose.yml" +} + +variable "ssl_enabled" { + description = "Enable SSL with Let's Encrypt" +} + +locals { + uuid = "${var.environment}-${substr(sha256(var.hostname), -10, -1)}" +} diff --git a/rancher/stack/vhost.tf b/rancher/stack/vhost.tf new file mode 100644 index 00000000..d9678ad4 --- /dev/null +++ b/rancher/stack/vhost.tf @@ -0,0 +1,59 @@ +resource "null_resource" "vhost" { + + count = replace(replace(var.ssl_enabled, "/false/", 1), "/true/", 0) + + triggers { + host = var.nginx_host + targets = join(",", var.target_hosts) + port = var.target_port + ssl_enabled = var.ssl_enabled + } + + provisioner "file" { + content = <./README.md + +format: + $(TERRAFORM) fmt -list=true ./ && \ + $(TERRAFORM) fmt -list=true ./organization && \ + $(TERRAFORM) fmt -list=true ./s3-remote-state + +list: + $(TERRAFORM) workspace list + +new: + $(TERRAFORM) workspace new $(WORKSPACE) + +select: + $(TERRAFORM) workspace select $(WORKSPACE) + +workspace: + $(TERRAFORM) workspace show + +plan: + $(TERRAFORM) plan -input=false -var-file=$(WORKSPACE).tfvars + +apply: + $(TERRAFORM) apply -input=false -var-file=$(WORKSPACE).tfvars $(TF_ARGS) \ No newline at end of file diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 00000000..8ff01eee --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,24 @@ +![Terraform](terraform.png) + +## Requirements + +No requirements. + +## Providers + +No provider. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| admin | Email address of the organization admin | `any` | `null` | no | +| backend\_type | Type of Terraform backend to provision | `string` | `"s3"` | no | +| membership | A list of organization team member email addresses | `list(string)` | `[]` | no | +| mfa\_delete | Enable MFA delete for versioned objects | `bool` | `false` | no | +| name | Name of the organization | `any` | `null` | no | + +## Outputs + +No output. + diff --git a/terraform/backend/main.tf b/terraform/backend/main.tf new file mode 100644 index 00000000..fb745357 --- /dev/null +++ b/terraform/backend/main.tf @@ -0,0 +1,23 @@ +data "aws_caller_identity" "current" {} + +module "tf_state" { + source = "figurate/s3-bucket/aws" + + bucket = "${data.aws_caller_identity.current.account_id}-terraform-state" + versioned = true + encrypted = true + mfa_delete = var.mfa_delete +} + +resource "aws_dynamodb_table" "tf_lock" { + hash_key = "LockID" + name = "terraform-lock" + attribute { + name = "LockID" + type = "S" + } + point_in_time_recovery { + enabled = true + } + billing_mode = "PAY_PER_REQUEST" +} diff --git a/terraform/backend/vars.tf b/terraform/backend/vars.tf new file mode 100644 index 00000000..dbf7a645 --- /dev/null +++ b/terraform/backend/vars.tf @@ -0,0 +1,4 @@ +variable "mfa_delete" { + description = "Enable MFA delete for versioned objects" + default = true +} diff --git a/terraform/diagram.py b/terraform/diagram.py new file mode 100644 index 00000000..b004a7a6 --- /dev/null +++ b/terraform/diagram.py @@ -0,0 +1,7 @@ +from diagrams import Diagram +from diagrams.onprem.iac import Terraform +from diagrams.aws.storage import S3 + +with Diagram("Terraform", show=False, direction="TB"): + Terraform("organization") + S3("terraform_state") diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 00000000..000558e5 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,18 @@ +/* + * ![Terraform](terraform.png) + */ +module "s3_remote_state" { + source = "./backend" + count = var.backend_type == "s3" ? 1 : 0 + + mfa_delete = var.mfa_delete +} + +module "tfe_organization" { + source = "./organizations" + count = var.backend_type == "tfe" ? 1 : 0 + + name = var.name + admin = var.admin + membership = var.membership +} diff --git a/terraform/organizations/.dockerignore b/terraform/organizations/.dockerignore new file mode 100644 index 00000000..3fa8c86b --- /dev/null +++ b/terraform/organizations/.dockerignore @@ -0,0 +1 @@ +.terraform diff --git a/terraform/organizations/.terraform.lock.hcl b/terraform/organizations/.terraform.lock.hcl new file mode 100644 index 00000000..276c427e --- /dev/null +++ b/terraform/organizations/.terraform.lock.hcl @@ -0,0 +1,20 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/tfe" { + version = "0.25.3" + hashes = [ + "h1:yLztDT2pPcRKnpryKb29xKujRmc82p5SuMt1fCFFT40=", + "zh:0979c23a42fb096ead4899ecd8117ef31dffaff68868eb59a4c00ff7aaa7ca52", + "zh:43b8f61b152cfa4e7568cff49c2252a4d67d35a8b3e3ce42fc87b0a2f86e80f1", + "zh:5326953390b5fa681a3f1989165f74782a06df2c27b2f833a592300ca5f7c84c", + "zh:79757c9ebbecba1ad6c76b49e382a36dcc56b94de04e6579b698bd574d5d42be", + "zh:92a91c40df51110a08597994c4af9c27d04ea86d36495056237c346324e1b993", + "zh:92def252c4e17c700d472b3bbbcf2396a92aa256d52382fddf24c5d9bbd56eea", + "zh:9f99e2f739439d395fc1a466448f799318cb7444268071cc57f43dbf0284d321", + "zh:a4421f846b0ffad38dbbc526be8f2bdb4ef67cbd1e4006f18232576865bb4510", + "zh:adefbca4d3041aa0d40aa347d6b6ebaa608ed797e132869cb2c0e43f83082e1f", + "zh:eb07847e98ee6fd7ad116a6c8f5563267393fd96b1e1e3d71c0f658114d1f2c7", + "zh:fe03bd4d2d34cd6d4a98d18910f95adb29e8e8ca844cab5614ba392a24f2ff6f", + ] +} diff --git a/terraform/organizations/Makefile b/terraform/organizations/Makefile new file mode 100644 index 00000000..0e184ad1 --- /dev/null +++ b/terraform/organizations/Makefile @@ -0,0 +1,66 @@ +SHELL:=/bin/bash +AWS_DEFAULT_REGION?=ap-southeast-2 +AWS_ACCOUNT=`aws sts get-caller-identity | jq -r '.Account'` +TF_WORKSPACE=$(AWS_ACCOUNT) + +ifneq (, $(shell which docker)) +TERRAFORM_VERSION=1.0.0 +TERRAFORM=docker run --rm -v "${PWD}:/work" -v "${HOME}/.ssh:/root/.ssh" -v "${HOME}/.aws:/root/.aws" -v "${HOME}/.terraformrc:/root/.terraformrc" -e AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) -e http_proxy=$(http_proxy) --net=host -w /work hashicorp/terraform:$(TERRAFORM_VERSION) +else +TERRAFORM=terraform +endif + +TERRAFORM_DOCS=docker run --rm -v "${PWD}:/work" tmknom/terraform-docs + +CHECKOV=docker run --rm -v "${PWD}:/work" bridgecrew/checkov + +TFSEC=docker run --rm -v "${PWD}:/work" liamg/tfsec + +DIAGRAMS=docker run -v "${PWD}:/work" figurate/diagrams python + +.PHONY: all clean init validate test docs format list new select plan apply + +WORKSPACE=$(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + +all: test docs format + +clean: + rm -rf .terraform/ + +init: + $(TERRAFORM) init -upgrade + +validate: init + $(TERRAFORM) validate + +test: validate + $(CHECKOV) -d /work + + $(TFSEC) /work + +diagram: + $(DIAGRAMS) diagram.py + +docs: diagram + $(TERRAFORM_DOCS) markdown ./ >./README.md + +format: + $(TERRAFORM) fmt -list=true ./ + +list: + $(TERRAFORM) workspace list + +new: + $(TERRAFORM) workspace new $(WORKSPACE) + +select: + $(TERRAFORM) workspace select $(WORKSPACE) + +workspace: + $(TERRAFORM) workspace show + +plan: + $(TERRAFORM) plan -input=false -var-file=$(WORKSPACE).tfvars + +apply: + $(TERRAFORM) apply -input=false -var-file=$(WORKSPACE).tfvars $(TF_ARGS) \ No newline at end of file diff --git a/terraform/organizations/README.md b/terraform/organizations/README.md new file mode 100644 index 00000000..769eda14 --- /dev/null +++ b/terraform/organizations/README.md @@ -0,0 +1,23 @@ +# Terraform Organizations +Provision a list of Terraform organizations +![Terraform](terraform.png) + +## Requirements + +No requirements. + +## Providers + +No provider. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| membership | A list of organization team members | `list(string)` | `[]` | no | +| organizations | A list of organization names and administrators | `list(tuple([string, string]))` | `[]` | no | + +## Outputs + +No output. + diff --git a/terraform/organizations/diagram.py b/terraform/organizations/diagram.py new file mode 100644 index 00000000..b004a7a6 --- /dev/null +++ b/terraform/organizations/diagram.py @@ -0,0 +1,7 @@ +from diagrams import Diagram +from diagrams.onprem.iac import Terraform +from diagrams.aws.storage import S3 + +with Diagram("Terraform", show=False, direction="TB"): + Terraform("organization") + S3("terraform_state") diff --git a/terraform/organizations/main.tf b/terraform/organizations/main.tf new file mode 100644 index 00000000..eba91b80 --- /dev/null +++ b/terraform/organizations/main.tf @@ -0,0 +1,12 @@ +/* + * # Terraform Organizations + * Provision a list of Terraform organizations + * ![Terraform](terraform.png) + */ +module "organization" { + source = "figurate/organization/tfe" + count = length(var.organizations) + name = var.organizations[count.index][0] + admin = var.organizations[count.index][1] + collaborator_auth_policy = "two_factor_mandatory" +} diff --git a/terraform/organizations/terraform.png b/terraform/organizations/terraform.png new file mode 100644 index 00000000..f2031e0e Binary files /dev/null and b/terraform/organizations/terraform.png differ diff --git a/terraform/organizations/vars.tf b/terraform/organizations/vars.tf new file mode 100644 index 00000000..a361a3b8 --- /dev/null +++ b/terraform/organizations/vars.tf @@ -0,0 +1,11 @@ +variable "organizations" { + description = "A list of organization names and administrators" + type = list(tuple([string, string])) + default = [] +} + +variable "membership" { + description = "A list of organization team members" + type = list(string) + default = [] +} diff --git a/terraform/provider.tf b/terraform/provider.tf new file mode 100644 index 00000000..b21d3b6a --- /dev/null +++ b/terraform/provider.tf @@ -0,0 +1 @@ +provider "aws" {} diff --git a/terraform/terraform.png b/terraform/terraform.png new file mode 100644 index 00000000..f2031e0e Binary files /dev/null and b/terraform/terraform.png differ diff --git a/terraform/vars.tf b/terraform/vars.tf new file mode 100644 index 00000000..abc08793 --- /dev/null +++ b/terraform/vars.tf @@ -0,0 +1,25 @@ +variable "backend_type" { + description = "Type of Terraform backend to provision" + default = "s3" +} + +variable "name" { + description = "Name of the organization" + default = null +} + +variable "admin" { + description = "Email address of the organization admin" + default = null +} + +variable "membership" { + description = "A list of organization team member email addresses" + type = list(string) + default = [] +} + +variable "mfa_delete" { + description = "Enable MFA delete for versioned objects" + default = false +}