From c9dbc96131e2285af7379315ee4a9e00dba02906 Mon Sep 17 00:00:00 2001 From: Bill Monkman Date: Wed, 16 Sep 2020 17:40:29 -0700 Subject: [PATCH 01/10] When creating an EKS cluster, the user who does the creation is assigned special access to be able to connect to the cluster to do the initial setup. This can cause issues with terraform where if another user tries to run the terraform they may not have access to the cluster since they are not the initial user. We were able to work around this in the kubernetes terraform by adding an `exec` block which defined a local command to run to get a token to access the cluster (`aws eks get-token`). This was also not ideal because it depends a lot more on the running user's local k8s setup. The fix: We determined that the user-binding-on-cluster-create behaviour also applies to Roles. This commit has code which adds a role with access to create an EKS cluster, and then uses an AWS provider with an alias to assume that role only while running the EKS module. Unfortunately we had to move the creation of the new role into the bootstrap because of an order-of-operations issue with trying to assume a role in a provider that was created in the same tf run. --- templates/Makefile | 1 + .../terraform/modules/kubernetes/provider.tf | 47 ++++++----- .../bootstrap/secrets/eks_creator_user.tf | 80 +++++++++++++++++++ templates/terraform/bootstrap/secrets/main.tf | 13 +-- .../terraform/modules/environment/iam.tf | 11 ++- .../terraform/modules/environment/main.tf | 6 +- .../terraform/modules/environment/provider.tf | 35 ++++++++ 7 files changed, 165 insertions(+), 28 deletions(-) create mode 100644 templates/terraform/bootstrap/secrets/eks_creator_user.tf diff --git a/templates/Makefile b/templates/Makefile index 2436321..423da39 100644 --- a/templates/Makefile +++ b/templates/Makefile @@ -57,6 +57,7 @@ teardown-secrets: aws secretsmanager list-secrets --region <% index .Params `region` %> --query "SecretList[?Tags[?Key=='sendgrid' && Value=='$(PROJECT)']].[Name] | [0][0]" | xargs aws secretsmanager delete-secret --region <% index .Params `region` %> --secret-id && \ aws iam delete-access-key --user-name $(PROJECT)-ci-user --access-key-id $(shell aws iam list-access-keys --user-name $(PROJECT)-ci-user --query "AccessKeyMetadata[0].AccessKeyId" | sed 's/"//g') && \ aws iam delete-user --user-name $(PROJECT)-ci-user + aws iam delete-role --role-name $(PROJECT)-eks-cluster-creator teardown-env: cd terraform/environments/$(ENVIRONMENT) && \ diff --git a/templates/kubernetes/terraform/modules/kubernetes/provider.tf b/templates/kubernetes/terraform/modules/kubernetes/provider.tf index a6dc92e..8e8ebac 100644 --- a/templates/kubernetes/terraform/modules/kubernetes/provider.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/provider.tf @@ -1,27 +1,38 @@ data "aws_caller_identity" "current" {} + +# Created by bootstrap/secrets +data "aws_iam_role" "eks_cluster_creator" { + name = "${var.project}-eks-cluster-creator" +} + +# Used only for EKS creation to tie "cluster creator" to a role instead of the user who runs terraform +# This allows us to rely on credentials pulled from the EKS cluster instead of the user's local kube config +provider "aws" { + alias = "for_eks" + + region = var.region + allowed_account_ids = var.allowed_account_ids + + assume_role { + role_arn = data.aws_iam_role.eks_cluster_creator.arn + } +} + data "aws_eks_cluster" "cluster" { - name = var.cluster_name + provider = aws.for_eks + name = module.eks.cluster_id } -data "aws_eks_cluster_auth" "cluster_auth" { - name = data.aws_eks_cluster.cluster.name +data "aws_eks_cluster_auth" "cluster" { + provider = aws.for_eks + name = module.eks.cluster_id } provider "kubernetes" { - ## This is a workaround because aws-eks-cluster-auth will default to us-east-1 - ## leading to an invalid token to access the cluster - exec { - api_version = "client.authentication.k8s.io/v1alpha1" - command = "aws" - args = [ - "eks", - "get-token", - "--region", - var.region, - "--cluster-name", - var.cluster_name, - "--role", - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${var.project}-kubernetes-admin-${var.environment}"] - } + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + token = data.aws_eks_cluster_auth.cluster.token + load_config_file = false + version = "~> 1.11" } diff --git a/templates/terraform/bootstrap/secrets/eks_creator_user.tf b/templates/terraform/bootstrap/secrets/eks_creator_user.tf new file mode 100644 index 0000000..869c55c --- /dev/null +++ b/templates/terraform/bootstrap/secrets/eks_creator_user.tf @@ -0,0 +1,80 @@ + +# +# EKS Cluster Creator Role +# This has to be created first because it is used by the aws provider in the main terraform, so it can't be created by +# that same terraform due to a chicken-and-egg situation. + +# Cluster creator role +resource "aws_iam_role" "eks_cluster_creator" { + name = "${local.project}-eks-cluster-creator" + assume_role_policy = data.aws_iam_policy_document.assumerole_root_only_policy.json + description = "EKS cluster creator role" +} + +# Trust relationship +data "aws_iam_policy_document" "assumerole_root_only_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "AWS" + identifiers = [local.aws_account_id] + } + } +} + +# Attach AWS managed policy for EKS +resource "aws_iam_role_policy_attachment" "eks_cluster_creator_managed" { + role = aws_iam_role.eks_cluster_creator.id + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" +} + +# Attach additional permissions +resource "aws_iam_role_policy" "eks_cluster_creator" { + name = "manage_eks" + role = aws_iam_role.eks_cluster_creator.id + + policy = data.aws_iam_policy_document.eks_manage.json +} + +# Allow the cluster creator role to create a cluster +data "aws_iam_policy_document" "eks_manage" { + statement { + actions = [ + "eks:*", + "ec2:*", + "autoscaling:*", + "iam:CreateOpenIDConnectProvider", + "iam:DeleteOpenIDConnectProvider", + "iam:GetOpenIDConnectProvider", + "iam:ListOpenIDConnectProviders", + "iam:CreateInstanceProfile", + "iam:DeleteInstanceProfile", + "iam:GetInstanceProfile", + "iam:ListInstanceProfiles", + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:ListInstanceProfilesForRole", + ] + resources = ["*"] + } + + statement { + actions = [ + "iam:GetRole", + "iam:PassRole", + "iam:CreateRole", + "iam:DeleteRole", + "iam:TagRole", + "iam:UntagRole", + "iam:AttachRolePolicy", + "iam:DetachRolePolicy", + "iam:ListAttachedRolePolicies", + "iam:ListRolePolicies" + ] + resources = [ + "arn:aws:iam::${local.aws_account_id}:role/${local.project}-*", + "arn:aws:iam::${local.aws_account_id}:role/k8s-${local.project}-*", + ] + } +} diff --git a/templates/terraform/bootstrap/secrets/main.tf b/templates/terraform/bootstrap/secrets/main.tf index 67e301c..47170f4 100644 --- a/templates/terraform/bootstrap/secrets/main.tf +++ b/templates/terraform/bootstrap/secrets/main.tf @@ -1,15 +1,16 @@ -provider "aws" { - region = "<% index .Params `region` %>" - allowed_account_ids = [ "<% index .Params `accountId` %>" ] -} +locals { + project = "<% .Name %>" + aws_account_id = "<% index .Params `accountId` %>" +} terraform { required_version = ">= 0.13" } -locals { - project = "<% .Name %>" +provider "aws" { + region = "<% index .Params `region` %>" + allowed_account_ids = [ local.aws_account_id ] } # Create the CI User diff --git a/templates/terraform/modules/environment/iam.tf b/templates/terraform/modules/environment/iam.tf index 35da372..447e98d 100644 --- a/templates/terraform/modules/environment/iam.tf +++ b/templates/terraform/modules/environment/iam.tf @@ -1,4 +1,6 @@ -# @TODO - sort out creating only a single user but multiple roles per env + +# +# Kubernetes admin role # Create KubernetesAdmin role for aws-iam-authenticator resource "aws_iam_role" "kubernetes_admin_role" { @@ -14,7 +16,7 @@ data "aws_iam_policy_document" "assumerole_root_policy" { principals { type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + identifiers = [ data.aws_caller_identity.current.account_id ] } } @@ -29,12 +31,15 @@ data "aws_iam_policy_document" "assumerole_root_policy" { } } + +# +# CI User + resource "aws_iam_user_policy_attachment" "circleci_ecr_access" { user = data.aws_iam_user.ci_user.user_name policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser" } - # Allow the CI user to list and describe clusters data "aws_iam_policy_document" "eks_list_and_describe" { statement { diff --git a/templates/terraform/modules/environment/main.tf b/templates/terraform/modules/environment/main.tf index 05e1577..ab8f6c7 100644 --- a/templates/terraform/modules/environment/main.tf +++ b/templates/terraform/modules/environment/main.tf @@ -17,17 +17,21 @@ module "vpc" { environment = var.environment region = var.region kubernetes_cluster_name = local.kubernetes_cluster_name - single_nat_gateway = var.vpc_use_single_nat_gateway + single_nat_gateway = var.vpc_use_single_nat_gateway } # To get the current account id data "aws_caller_identity" "current" {} + # # Provision the EKS cluster module "eks" { source = "commitdev/zero/aws//modules/eks" version = "0.0.2" + providers = { + aws = aws.for_eks + } project = var.project environment = var.environment diff --git a/templates/terraform/modules/environment/provider.tf b/templates/terraform/modules/environment/provider.tf index 8072c85..9e77619 100644 --- a/templates/terraform/modules/environment/provider.tf +++ b/templates/terraform/modules/environment/provider.tf @@ -3,3 +3,38 @@ provider "aws" { allowed_account_ids = var.allowed_account_ids } +# Created by bootstrap/secrets +data "aws_iam_role" "eks_cluster_creator" { + name = "${var.project}-eks-cluster-creator" +} + +# Used only for EKS creation to tie "cluster creator" to a role instead of the user who runs terraform +# This allows us to rely on credentials pulled from the EKS cluster instead of the user's local kube config +provider "aws" { + alias = "for_eks" + + region = var.region + allowed_account_ids = var.allowed_account_ids + + assume_role { + role_arn = data.aws_iam_role.eks_cluster_creator.arn + } +} + +data "aws_eks_cluster" "cluster" { + provider = aws.for_eks + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + provider = aws.for_eks + name = module.eks.cluster_id +} + +provider "kubernetes" { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + token = data.aws_eks_cluster_auth.cluster.token + load_config_file = false + version = "~> 1.11" +} From 8bb29bfb320deb321fa9705824852cb3b067eab7 Mon Sep 17 00:00:00 2001 From: Bill Monkman Date: Wed, 16 Sep 2020 17:57:03 -0700 Subject: [PATCH 02/10] Referred to the wrong var for cluster name --- templates/kubernetes/terraform/modules/kubernetes/provider.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/kubernetes/terraform/modules/kubernetes/provider.tf b/templates/kubernetes/terraform/modules/kubernetes/provider.tf index 8e8ebac..693194f 100644 --- a/templates/kubernetes/terraform/modules/kubernetes/provider.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/provider.tf @@ -21,12 +21,12 @@ provider "aws" { data "aws_eks_cluster" "cluster" { provider = aws.for_eks - name = module.eks.cluster_id + name = var.cluster_name } data "aws_eks_cluster_auth" "cluster" { provider = aws.for_eks - name = module.eks.cluster_id + name = var.cluster_name } provider "kubernetes" { From 710742beaa53fe957a8f81e32722d3841f393443 Mon Sep 17 00:00:00 2001 From: Bill Monkman Date: Wed, 16 Sep 2020 18:18:55 -0700 Subject: [PATCH 03/10] Added && to chain deletion in makefile, hopefully we can change this soon so it's not necessary --- templates/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/Makefile b/templates/Makefile index 423da39..6330b55 100644 --- a/templates/Makefile +++ b/templates/Makefile @@ -56,7 +56,7 @@ teardown-secrets: aws secretsmanager list-secrets --region <% index .Params `region` %> --query "SecretList[?Tags[?Key=='rds' && Value=='$(PROJECT)-$(ENVIRONMENT)']].[Name] | [0][0]" | xargs aws secretsmanager delete-secret --region <% index .Params `region` %> --secret-id && \ aws secretsmanager list-secrets --region <% index .Params `region` %> --query "SecretList[?Tags[?Key=='sendgrid' && Value=='$(PROJECT)']].[Name] | [0][0]" | xargs aws secretsmanager delete-secret --region <% index .Params `region` %> --secret-id && \ aws iam delete-access-key --user-name $(PROJECT)-ci-user --access-key-id $(shell aws iam list-access-keys --user-name $(PROJECT)-ci-user --query "AccessKeyMetadata[0].AccessKeyId" | sed 's/"//g') && \ - aws iam delete-user --user-name $(PROJECT)-ci-user + aws iam delete-user --user-name $(PROJECT)-ci-user && \ aws iam delete-role --role-name $(PROJECT)-eks-cluster-creator teardown-env: From b0df4893c9afabc6d6c35f6d2b9a540fe3aa7d55 Mon Sep 17 00:00:00 2001 From: Bill Monkman Date: Thu, 17 Sep 2020 12:44:11 -0700 Subject: [PATCH 04/10] Fixed reference to allowed_account_ids on k8s side, ran tf fmt --- .../terraform/environments/dev/main.tf | 13 ++-- .../terraform/environments/prod/main.tf | 13 +++- .../terraform/environments/stage/main.tf | 14 ++-- .../terraform/modules/kubernetes/provider.tf | 4 +- .../terraform/modules/kubernetes/variables.tf | 5 ++ templates/terraform/bootstrap/secrets/main.tf | 36 ++++----- .../terraform/modules/environment/iam.tf | 10 +-- .../terraform/modules/environment/main.tf | 74 +++++++++---------- .../terraform/modules/environment/provider.tf | 4 +- 9 files changed, 95 insertions(+), 78 deletions(-) diff --git a/templates/kubernetes/terraform/environments/dev/main.tf b/templates/kubernetes/terraform/environments/dev/main.tf index 4c7e865..98a0db5 100644 --- a/templates/kubernetes/terraform/environments/dev/main.tf +++ b/templates/kubernetes/terraform/environments/dev/main.tf @@ -14,9 +14,10 @@ module "kubernetes" { project = "<% .Name %>" - environment = "dev" - region = "<% index .Params `region` %>" - random_seed = "<% index .Params `randomSeed` %>" + environment = "dev" + region = "<% index .Params `region` %>" + allowed_account_ids = ["<% index .Params `accountId` %>"] + random_seed = "<% index .Params `randomSeed` %>" # Authenticate with the EKS cluster via the cluster id cluster_name = "<% .Name %>-dev-<% index .Params `region` %>" @@ -30,14 +31,16 @@ module "kubernetes" { # Logging configuration logging_type = "<% index .Params `loggingType` %>" - # Application policy list + # Application policy list - This allows applications running in kubernetes to have access to AWS resources. + # Specify the service account name, the namespace, and the policy that should be applied. + # This makes use of IRSA: https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/ application_policy_list = [ { service_account = "backend-service" namespace = "<% .Name %>" policy = data.aws_iam_policy_document.resource_access_backendservice } - # could be more policies defined here (if have) + # Add additional mappings here ] # Wireguard configuration diff --git a/templates/kubernetes/terraform/environments/prod/main.tf b/templates/kubernetes/terraform/environments/prod/main.tf index 4a1d16e..7ed3e3e 100644 --- a/templates/kubernetes/terraform/environments/prod/main.tf +++ b/templates/kubernetes/terraform/environments/prod/main.tf @@ -18,8 +18,10 @@ module "kubernetes" { project = "<% .Name %>" - environment = "prod" - region = "<% index .Params `region` %>" + environment = "prod" + region = "<% index .Params `region` %>" + allowed_account_ids = ["<% index .Params `accountId` %>"] + random_seed = "<% index .Params `randomSeed` %>" # Authenticate with the EKS cluster via the cluster id cluster_name = "<% .Name %>-prod-<% index .Params `region` %>" @@ -33,16 +35,19 @@ module "kubernetes" { # Logging configuration logging_type = "<% index .Params `loggingType` %>" - # Application policy list + # Application policy list - This allows applications running in kubernetes to have access to AWS resources. + # Specify the service account name, the namespace, and the policy that should be applied. + # This makes use of IRSA: https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/ application_policy_list = [ { service_account = "backend-service" namespace = "<% .Name %>" policy = data.aws_iam_policy_document.resource_access_backendservice } - # could be more policies defined here (if have) + # Add additional mappings here ] + # Wireguard configuration vpn_server_address = "10.10.99.0/24" vpn_client_publickeys = [ diff --git a/templates/kubernetes/terraform/environments/stage/main.tf b/templates/kubernetes/terraform/environments/stage/main.tf index 5369e94..b369fd9 100644 --- a/templates/kubernetes/terraform/environments/stage/main.tf +++ b/templates/kubernetes/terraform/environments/stage/main.tf @@ -18,9 +18,10 @@ module "kubernetes" { project = "<% .Name %>" - environment = "stage" - region = "<% index .Params `region` %>" - random_seed = "<% index .Params `randomSeed` %>" + environment = "stage" + region = "<% index .Params `region` %>" + allowed_account_ids = ["<% index .Params `accountId` %>"] + random_seed = "<% index .Params `randomSeed` %>" # Authenticate with the EKS cluster via the cluster id cluster_name = "<% .Name %>-stage-<% index .Params `region` %>" @@ -34,16 +35,19 @@ module "kubernetes" { # Logging configuration logging_type = "<% index .Params `loggingType` %>" - # Application policy list + # Application policy list - This allows applications running in kubernetes to have access to AWS resources. + # Specify the service account name, the namespace, and the policy that should be applied. + # This makes use of IRSA: https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/ application_policy_list = [ { service_account = "backend-service" namespace = "<% .Name %>" policy = data.aws_iam_policy_document.resource_access_backendservice } - # could be more policies defined here (if have) + # Add additional mappings here ] + # Wireguard configuration vpn_server_address = "10.10.199.0/24" vpn_client_publickeys = [ diff --git a/templates/kubernetes/terraform/modules/kubernetes/provider.tf b/templates/kubernetes/terraform/modules/kubernetes/provider.tf index 693194f..ac38021 100644 --- a/templates/kubernetes/terraform/modules/kubernetes/provider.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/provider.tf @@ -21,12 +21,12 @@ provider "aws" { data "aws_eks_cluster" "cluster" { provider = aws.for_eks - name = var.cluster_name + name = var.cluster_name } data "aws_eks_cluster_auth" "cluster" { provider = aws.for_eks - name = var.cluster_name + name = var.cluster_name } provider "kubernetes" { diff --git a/templates/kubernetes/terraform/modules/kubernetes/variables.tf b/templates/kubernetes/terraform/modules/kubernetes/variables.tf index e428ad2..33cc0ab 100644 --- a/templates/kubernetes/terraform/modules/kubernetes/variables.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/variables.tf @@ -6,6 +6,11 @@ variable "project" { description = "The name of the project" } +variable "allowed_account_ids" { + description = "The IDs of AWS accounts for this project, to protect against mistakenly applying to the wrong env" + type = list(string) +} + variable "environment" { description = "Environment" } diff --git a/templates/terraform/bootstrap/secrets/main.tf b/templates/terraform/bootstrap/secrets/main.tf index 47170f4..27b3a5f 100644 --- a/templates/terraform/bootstrap/secrets/main.tf +++ b/templates/terraform/bootstrap/secrets/main.tf @@ -1,6 +1,6 @@ locals { - project = "<% .Name %>" + project = "<% .Name %>" aws_account_id = "<% index .Params `accountId` %>" } @@ -10,7 +10,7 @@ terraform { provider "aws" { region = "<% index .Params `region` %>" - allowed_account_ids = [ local.aws_account_id ] + allowed_account_ids = [local.aws_account_id] } # Create the CI User @@ -20,48 +20,48 @@ resource "aws_iam_user" "ci_user" { # Create a keypair to be used by CI systems resource "aws_iam_access_key" "ci_user" { - user = aws_iam_user.ci_user.name + user = aws_iam_user.ci_user.name } # Add the keys to AWS secrets manager module "ci_user_keys" { - source = "commitdev/zero/aws//modules/secret" + source = "commitdev/zero/aws//modules/secret" version = "0.0.2" - name = "ci-user-aws-keys<% index .Params `randomSeed` %>" - type = "map" - values = map("access_key_id", aws_iam_access_key.ci_user.id, "secret_key", aws_iam_access_key.ci_user.secret) - tags = map("project", local.project) + name = "ci-user-aws-keys<% index .Params `randomSeed` %>" + type = "map" + values = map("access_key_id", aws_iam_access_key.ci_user.id, "secret_key", aws_iam_access_key.ci_user.secret) + tags = map("project", local.project) } module "rds_master_secret_stage" { - source = "commitdev/zero/aws//modules/secret" + source = "commitdev/zero/aws//modules/secret" version = "0.0.2" - name = "${local.project}-stage-rds-<% index .Params `randomSeed` %>" + name = "${local.project}-stage-rds-<% index .Params `randomSeed` %>" type = "random" random_length = 32 - tags = map("rds", "${local.project}-stage") + tags = map("rds", "${local.project}-stage") } module "rds_master_secret_prod" { - source = "commitdev/zero/aws//modules/secret" + source = "commitdev/zero/aws//modules/secret" version = "0.0.2" - name = "${local.project}-prod-rds-<% index .Params `randomSeed` %>" + name = "${local.project}-prod-rds-<% index .Params `randomSeed` %>" type = "random" random_length = 32 - tags = map("rds", "${local.project}-prod") + tags = map("rds", "${local.project}-prod") } module "sendgrid_api_key" { - count = <%if eq (index .Params `sendgridApiKey`) "" %>0<% else %>1<% end %> - source = "commitdev/zero/aws//modules/secret" + count = <%if eq (index .Params `sendgridApiKey`) "" %>0<% else %>1<% end %> + source = "commitdev/zero/aws//modules/secret" version = "0.0.2" - name = "${local.project}-sendgrid-<% index .Params `randomSeed` %>" + name = "${local.project}-sendgrid-<% index .Params `randomSeed` %>" type = "string" value = "<% index .Params `sendgridApiKey` %>" - tags = map("sendgrid", local.project) + tags = map("sendgrid", local.project) } diff --git a/templates/terraform/modules/environment/iam.tf b/templates/terraform/modules/environment/iam.tf index 447e98d..14fd75f 100644 --- a/templates/terraform/modules/environment/iam.tf +++ b/templates/terraform/modules/environment/iam.tf @@ -16,7 +16,7 @@ data "aws_iam_policy_document" "assumerole_root_policy" { principals { type = "AWS" - identifiers = [ data.aws_caller_identity.current.account_id ] + identifiers = [data.aws_caller_identity.current.account_id] } } @@ -55,9 +55,9 @@ data "aws_iam_policy_document" "eks_list_and_describe" { } resource "aws_iam_policy" "eks_list_and_describe_policy" { - name_prefix = "eks-list-and-describe" + name_prefix = "eks-list-and-describe" description = "Policy to allow listing and describing EKS clusters for ${var.project} ${var.environment}" - policy = data.aws_iam_policy_document.eks_list_and_describe.json + policy = data.aws_iam_policy_document.eks_list_and_describe.json } resource "aws_iam_user_policy_attachment" "ci_user_list_and_describe_policy" { @@ -101,9 +101,9 @@ data "aws_iam_policy_document" "deploy_assets_policy" { } resource "aws_iam_policy" "deploy_assets_policy" { - name_prefix = "ci-deploy-assets" + name_prefix = "ci-deploy-assets" description = "Policy to allow a CI user to deploy assets for ${var.project} ${var.environment}" - policy = data.aws_iam_policy_document.deploy_assets_policy.json + policy = data.aws_iam_policy_document.deploy_assets_policy.json } resource "aws_iam_user_policy_attachment" "ci_s3_policy" { diff --git a/templates/terraform/modules/environment/main.tf b/templates/terraform/modules/environment/main.tf index ab8f6c7..cc0eede 100644 --- a/templates/terraform/modules/environment/main.tf +++ b/templates/terraform/modules/environment/main.tf @@ -5,12 +5,12 @@ locals { } data "aws_iam_user" "ci_user" { - user_name = "${var.project}-ci-user" # Should have been created in the bootstrap process + user_name = "${var.project}-ci-user" # Should have been created in the bootstrap process } module "vpc" { - source = "commitdev/zero/aws//modules/vpc" + source = "commitdev/zero/aws//modules/vpc" version = "0.0.1" project = var.project @@ -27,21 +27,21 @@ data "aws_caller_identity" "current" {} # # Provision the EKS cluster module "eks" { - source = "commitdev/zero/aws//modules/eks" + source = "commitdev/zero/aws//modules/eks" version = "0.0.2" providers = { aws = aws.for_eks } - project = var.project - environment = var.environment - cluster_name = local.kubernetes_cluster_name - cluster_version = var.eks_cluster_version + project = var.project + environment = var.environment + cluster_name = local.kubernetes_cluster_name + cluster_version = var.eks_cluster_version - iam_account_id = data.aws_caller_identity.current.account_id + iam_account_id = data.aws_caller_identity.current.account_id - private_subnets = module.vpc.private_subnets - vpc_id = module.vpc.vpc_id + private_subnets = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id worker_instance_type = var.eks_worker_instance_type worker_asg_min_size = var.eks_worker_asg_min_size @@ -51,40 +51,40 @@ module "eks" { module "wildcard_domain" { - source = "commitdev/zero/aws//modules/certificate" + source = "commitdev/zero/aws//modules/certificate" version = "0.0.1" - region = var.region - zone_name = var.domain_name - domain_names = ["*.${var.domain_name}"] + region = var.region + zone_name = var.domain_name + domain_names = ["*.${var.domain_name}"] } module "assets_domains" { - source = "commitdev/zero/aws//modules/certificate" + source = "commitdev/zero/aws//modules/certificate" version = "0.0.1" - region = "us-east-1" # For CF, the cert must be in us-east-1 - zone_name = var.domain_name - domain_names = var.s3_hosting_buckets + region = "us-east-1" # For CF, the cert must be in us-east-1 + zone_name = var.domain_name + domain_names = var.s3_hosting_buckets } module "s3_hosting" { - source = "commitdev/zero/aws//modules/s3_hosting" + source = "commitdev/zero/aws//modules/s3_hosting" version = "0.0.3" # We need to wait for certificate validation to complete before using the certs depends_on = [module.assets_domains.certificate_validations] - cf_signed_downloads = var.cf_signed_downloads - buckets = var.s3_hosting_buckets - project = var.project - environment = var.environment - certificate_arns = module.assets_domains.certificate_arns - route53_zone_id = module.assets_domains.route53_zone_id + cf_signed_downloads = var.cf_signed_downloads + buckets = var.s3_hosting_buckets + project = var.project + environment = var.environment + certificate_arns = module.assets_domains.certificate_arns + route53_zone_id = module.assets_domains.route53_zone_id } module "db" { - source = "commitdev/zero/aws//modules/database" + source = "commitdev/zero/aws//modules/database" version = "0.0.1" project = var.project @@ -98,26 +98,26 @@ module "db" { } module "ecr" { - source = "commitdev/zero/aws//modules/ecr" + source = "commitdev/zero/aws//modules/ecr" version = "0.0.1" - environment = var.environment - ecr_repositories = var.ecr_repositories - ecr_principals = [data.aws_iam_user.ci_user.arn] + environment = var.environment + ecr_repositories = var.ecr_repositories + ecr_principals = [data.aws_iam_user.ci_user.arn] } module "logging" { - source = "commitdev/zero/aws//modules/logging" + source = "commitdev/zero/aws//modules/logging" version = "0.0.1" - count = var.logging_type == "kibana" ? 1 : 0 + count = var.logging_type == "kibana" ? 1 : 0 project = var.project environment = var.environment vpc_id = module.vpc.vpc_id elasticsearch_version = var.logging_es_version - security_groups = [module.eks.worker_security_group_id] # TODO : Add vpn SG when available - subnet_ids = slice(module.vpc.private_subnets.*, 1, (1+var.logging_az_count)) # We will use 2 subnets + security_groups = [module.eks.worker_security_group_id] # TODO : Add vpn SG when available + subnet_ids = slice(module.vpc.private_subnets.*, 1, (1 + var.logging_az_count)) # We will use 2 subnets instance_type = var.logging_es_instance_type instance_count = var.logging_es_instance_count ebs_volume_size_in_gb = var.logging_volume_size_in_gb @@ -125,10 +125,10 @@ module "logging" { } module "sendgrid" { - source = "commitdev/zero/aws//modules/sendgrid" + source = "commitdev/zero/aws//modules/sendgrid" version = "0.0.2" - count = var.sendgrid_enabled ? 1 : 0 + count = var.sendgrid_enabled ? 1 : 0 - zone_name = var.domain_name + zone_name = var.domain_name sendgrid_api_key_secret_name = var.sendgrid_api_key_secret_name } diff --git a/templates/terraform/modules/environment/provider.tf b/templates/terraform/modules/environment/provider.tf index 9e77619..96e8adb 100644 --- a/templates/terraform/modules/environment/provider.tf +++ b/templates/terraform/modules/environment/provider.tf @@ -23,12 +23,12 @@ provider "aws" { data "aws_eks_cluster" "cluster" { provider = aws.for_eks - name = module.eks.cluster_id + name = module.eks.cluster_id } data "aws_eks_cluster_auth" "cluster" { provider = aws.for_eks - name = module.eks.cluster_id + name = module.eks.cluster_id } provider "kubernetes" { From fa97aa59e2cd59b40f06cf611ae6f09b2f56dccb Mon Sep 17 00:00:00 2001 From: Steven Shi Date: Thu, 17 Sep 2020 13:09:36 -0700 Subject: [PATCH 05/10] fixed a typo... --- .../kubernetes/terraform/modules/kubernetes/backend_service.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/kubernetes/terraform/modules/kubernetes/backend_service.tf b/templates/kubernetes/terraform/modules/kubernetes/backend_service.tf index 3c6e9f7..2d3ffef 100644 --- a/templates/kubernetes/terraform/modules/kubernetes/backend_service.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/backend_service.tf @@ -1,5 +1,5 @@ data "aws_secretsmanager_secret" "cf_keypair" { - name = "{var.project}_cf_keypair" + name = "${var.project}_cf_keypair" } data "aws_secretsmanager_secret_version" "cf_keypair" { From d3f2d7733a2779170d36f77a046506cea9ebe4e8 Mon Sep 17 00:00:00 2001 From: Steven Shi Date: Thu, 17 Sep 2020 13:11:34 -0700 Subject: [PATCH 06/10] remove unnessary file --- .../terraform/modules/kubernetes/files/wireguard-peer-csv.tpl | 1 - 1 file changed, 1 deletion(-) delete mode 100644 templates/kubernetes/terraform/modules/kubernetes/files/wireguard-peer-csv.tpl diff --git a/templates/kubernetes/terraform/modules/kubernetes/files/wireguard-peer-csv.tpl b/templates/kubernetes/terraform/modules/kubernetes/files/wireguard-peer-csv.tpl deleted file mode 100644 index 3e071c4..0000000 --- a/templates/kubernetes/terraform/modules/kubernetes/files/wireguard-peer-csv.tpl +++ /dev/null @@ -1 +0,0 @@ -${tpl_client_name}|${tpl_client_ip}|${tpl_client_pub_key} From e94c6096625529427a1ef948591933224663acfb Mon Sep 17 00:00:00 2001 From: Steven Shi Date: Thu, 17 Sep 2020 13:58:40 -0700 Subject: [PATCH 07/10] remove json decode for vpn key --- templates/kubernetes/terraform/modules/kubernetes/vpn.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/kubernetes/terraform/modules/kubernetes/vpn.tf b/templates/kubernetes/terraform/modules/kubernetes/vpn.tf index 79b1fcd..1c5515f 100644 --- a/templates/kubernetes/terraform/modules/kubernetes/vpn.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/vpn.tf @@ -69,7 +69,7 @@ resource "kubernetes_secret" "vpn_private_key" { } data = { - privatekey = jsondecode(data.aws_secretsmanager_secret_version.vpn_private_key.secret_string)["key"] + privatekey = data.aws_secretsmanager_secret_version.vpn_private_key.secret_string } type = "Opaque" From a95c758579bd096c47e89596186d6ca0d056e128 Mon Sep 17 00:00:00 2001 From: Bill Monkman Date: Thu, 17 Sep 2020 14:56:04 -0700 Subject: [PATCH 08/10] Fixed missing region in pre-k8s make target --- templates/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/Makefile b/templates/Makefile index 6330b55..c9828c1 100644 --- a/templates/Makefile +++ b/templates/Makefile @@ -26,7 +26,7 @@ apply-env: pre-k8s: @echo "Creating VPN private key..." WGKEY=$(shell kubectl run -i --tty zero-k8s-utilities --image=commitdev/zero-k8s-utilities:0.0.3 --restart=Never -- wg genkey) && kubectl delete pod/zero-k8s-utilities && \ - aws secretsmanager create-secret --name $(PROJECT)-$(ENVIRONMENT)-vpn-wg-privatekey-<% index .Params `randomSeed` %> --description "Auto-generated Wireguard VPN private key" --secret-string $$WGKEY + aws secretsmanager create-secret --region <% index .Params `region` %> --name $(PROJECT)-$(ENVIRONMENT)-vpn-wg-privatekey-<% index .Params `randomSeed` %> --description "Auto-generated Wireguard VPN private key" --secret-string $$WGKEY @echo "Done VPN private key creation" apply-k8s-utils: From 47ee2436f9e960bf9e67258cce061951e4cc4eb7 Mon Sep 17 00:00:00 2001 From: Bill Monkman Date: Thu, 17 Sep 2020 15:05:07 -0700 Subject: [PATCH 09/10] Changed vpn namespace references to ensure dependencies --- templates/kubernetes/terraform/modules/kubernetes/vpn.tf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/templates/kubernetes/terraform/modules/kubernetes/vpn.tf b/templates/kubernetes/terraform/modules/kubernetes/vpn.tf index 1c5515f..079547b 100644 --- a/templates/kubernetes/terraform/modules/kubernetes/vpn.tf +++ b/templates/kubernetes/terraform/modules/kubernetes/vpn.tf @@ -65,7 +65,7 @@ resource "kubernetes_namespace" "vpn_namespace" { resource "kubernetes_secret" "vpn_private_key" { metadata { name = "wg-secret" - namespace = local.namespace + namespace = kubernetes_namespace.vpn_namespace.metadata[0].name } data = { @@ -78,7 +78,7 @@ resource "kubernetes_secret" "vpn_private_key" { resource "kubernetes_config_map" "vpn_configmap" { metadata { name = "wg-configmap" - namespace = local.namespace + namespace = kubernetes_namespace.vpn_namespace.metadata[0].name } data = { @@ -89,7 +89,7 @@ resource "kubernetes_config_map" "vpn_configmap" { resource "kubernetes_service" "wireguard" { metadata { name = "wireguard" - namespace = local.namespace + namespace = kubernetes_namespace.vpn_namespace.metadata[0].name labels = { app = "wireguard" @@ -121,7 +121,7 @@ resource "kubernetes_service" "wireguard" { resource "kubernetes_deployment" "wireguard" { metadata { name = "wireguard" - namespace = local.namespace + namespace = kubernetes_namespace.vpn_namespace.metadata[0].name } spec { From 5b5c0c1577e5afa70b44997f3118f463a87c0913 Mon Sep 17 00:00:00 2001 From: Bill Monkman Date: Thu, 17 Sep 2020 15:26:43 -0700 Subject: [PATCH 10/10] Make sure there is an aws provider at the root of each environment --- templates/kubernetes/terraform/environments/dev/main.tf | 5 +++++ templates/kubernetes/terraform/environments/prod/main.tf | 4 +++- templates/terraform/environments/prod/main.tf | 5 +++++ templates/terraform/environments/stage/main.tf | 5 +++++ templates/terraform/modules/environment/provider.tf | 5 ----- 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/templates/kubernetes/terraform/environments/dev/main.tf b/templates/kubernetes/terraform/environments/dev/main.tf index 98a0db5..47113af 100644 --- a/templates/kubernetes/terraform/environments/dev/main.tf +++ b/templates/kubernetes/terraform/environments/dev/main.tf @@ -8,6 +8,11 @@ terraform { } } +provider "aws" { + region = "<% index .Params `region` %>" + allowed_account_ids = ["<% index .Params `accountId` %>"] +} + # Provision kubernetes resources required to run services/applications module "kubernetes" { source = "../../modules/kubernetes" diff --git a/templates/kubernetes/terraform/environments/prod/main.tf b/templates/kubernetes/terraform/environments/prod/main.tf index 7ed3e3e..11ecbeb 100644 --- a/templates/kubernetes/terraform/environments/prod/main.tf +++ b/templates/kubernetes/terraform/environments/prod/main.tf @@ -9,9 +9,11 @@ terraform { } provider "aws" { - region = "<% index .Params `region` %>" + region = "<% index .Params `region` %>" + allowed_account_ids = ["<% index .Params `accountId` %>"] } + # Provision kubernetes resources required to run services/applications module "kubernetes" { source = "../../modules/kubernetes" diff --git a/templates/terraform/environments/prod/main.tf b/templates/terraform/environments/prod/main.tf index e3ee6a0..2840ef3 100644 --- a/templates/terraform/environments/prod/main.tf +++ b/templates/terraform/environments/prod/main.tf @@ -9,6 +9,11 @@ terraform { } } +provider "aws" { + region = "<% index .Params `region` %>" + allowed_account_ids = ["<% index .Params `accountId` %>"] +} + # Instantiate the production environment module "prod" { source = "../../modules/environment" diff --git a/templates/terraform/environments/stage/main.tf b/templates/terraform/environments/stage/main.tf index b51d3b1..8b19b1e 100644 --- a/templates/terraform/environments/stage/main.tf +++ b/templates/terraform/environments/stage/main.tf @@ -9,6 +9,11 @@ terraform { } } +provider "aws" { + region = "<% index .Params `region` %>" + allowed_account_ids = ["<% index .Params `accountId` %>"] +} + # Instantiate the staging environment module "stage" { source = "../../modules/environment" diff --git a/templates/terraform/modules/environment/provider.tf b/templates/terraform/modules/environment/provider.tf index 96e8adb..8d5cbbe 100644 --- a/templates/terraform/modules/environment/provider.tf +++ b/templates/terraform/modules/environment/provider.tf @@ -1,8 +1,3 @@ -provider "aws" { - region = var.region - allowed_account_ids = var.allowed_account_ids -} - # Created by bootstrap/secrets data "aws_iam_role" "eks_cluster_creator" { name = "${var.project}-eks-cluster-creator"