diff --git a/templates/Makefile b/templates/Makefile index bfd93da..5ec2f47 100644 --- a/templates/Makefile +++ b/templates/Makefile @@ -1,11 +1,12 @@ SHELL = /usr/bin/env bash ENVIRONMENT ?= stage PROJECT = <% .Name %> +ROLE ?= admin export AWS_DEFAULT_REGION = <% index .Params `region` %> export AWS_PAGER = KUBE_CONTEXT := $(PROJECT)-$(ENVIRONMENT)-$(AWS_DEFAULT_REGION) -apply: apply-remote-state apply-secrets apply-env update-k8s-conf pre-k8s apply-k8s-utils post-apply-setup +apply: apply-remote-state apply-shared-remote-state apply-secrets apply-shared-env apply-env update-k8s-conf pre-k8s apply-k8s-utils post-apply-setup apply-remote-state: aws s3 ls $(PROJECT)-$(ENVIRONMENT)-terraform-state > /dev/null 2>&1 || ( \ @@ -14,6 +15,13 @@ apply-remote-state: terraform apply -var "environment=$(ENVIRONMENT)" $(AUTO_APPROVE) && \ rm ./terraform.tfstate ) +apply-shared-remote-state: + aws s3 ls $(PROJECT)-shared-terraform-state > /dev/null 2>&1 || ( \ + cd terraform/bootstrap/remote-state && \ + terraform init && \ + terraform apply -var "environment=shared" $(AUTO_APPROVE) && \ + rm ./terraform.tfstate ) + apply-secrets: aws iam list-access-keys --user-name $(PROJECT)-ci-user > /dev/null 2>&1 || ( \ cd terraform/bootstrap/secrets && \ @@ -21,6 +29,11 @@ apply-secrets: terraform apply $(AUTO_APPROVE) && \ rm ./terraform.tfstate ) +apply-shared-env: + cd terraform/environments/shared; \ + terraform init && \ + terraform apply $(AUTO_APPROVE) + apply-env: cd terraform/environments/$(ENVIRONMENT); \ terraform init && \ @@ -40,12 +53,12 @@ apply-k8s-utils: terraform apply $(AUTO_APPROVE) update-k8s-conf: - aws eks --region $(AWS_DEFAULT_REGION) update-kubeconfig --role "arn:aws:iam::<% index .Params `accountId` %>:role/$(PROJECT)-kubernetes-admin-$(ENVIRONMENT)" --name $(KUBE_CONTEXT) --alias $(KUBE_CONTEXT) + aws eks --region $(AWS_DEFAULT_REGION) update-kubeconfig --role "arn:aws:iam::<% index .Params `accountId` %>:role/$(PROJECT)-kubernetes-$(ROLE)-$(ENVIRONMENT)" --name $(KUBE_CONTEXT) --alias $(KUBE_CONTEXT) post-apply-setup: cd scripts && ENVIRONMENT=$(ENVIRONMENT) PROJECT=$(PROJECT) sh post-apply.sh -teardown: teardown-k8s-utils teardown-env teardown-secrets teardown-remote-state +teardown: teardown-k8s-utils teardown-env teardown-shared-env teardown-secrets teardown-remote-state teardown-shared-remote-state teardown-remote-state: @echo "Deleting remote state is not reversible, are you sure you want to delete the resources? [y/N]:" ; read ans ; [ $${ans:-N} == "y" ] || exit 1 @@ -54,6 +67,13 @@ teardown-remote-state: # TODO : This doesn't work because bucket versioning is enabled, we would need to loop through all versions of files and delete them manually aws s3 rb s3://$(PROJECT)-$(ENVIRONMENT)-terraform-state --force +teardown-shared-remote-state: + @echo "Deleting shared remote state is not reversible, are you sure you want to delete the resources? [y/N]:" ; read ans ; [ $${ans:-N} == "y" ] || exit 1 + aws dynamodb delete-table --region $(AWS_DEFAULT_REGION) --table-name $(PROJECT)-shared-terraform-state-locks + aws s3 rm s3://$(PROJECT)-shared-terraform-state --recursive + # TODO : This doesn't work because bucket versioning is enabled, we would need to loop through all versions of files and delete them manually + aws s3 rb s3://$(PROJECT)-shared-terraform-state --force + teardown-secrets: @echo "Deleting secrets is not reversible, are you sure you want to delete the secrets? [y/N]:" ; read ans ; [ $${ans:-N} == "y" ] || exit 1 aws secretsmanager list-secrets --region $(AWS_DEFAULT_REGION) --query "SecretList[?Tags[?Key=='project' && Value=='$(PROJECT)']].[Name] | [0][0]" | xargs aws secretsmanager delete-secret --region $(AWS_DEFAULT_REGION) --secret-id || echo "Secret already removed" @@ -69,8 +89,12 @@ teardown-env: cd terraform/environments/$(ENVIRONMENT) && \ terraform destroy +teardown-shared-env: + cd terraform/environments/shared && \ + terraform destroy + teardown-k8s-utils: cd kubernetes/terraform/environments/$(ENVIRONMENT) && \ terraform destroy -.PHONY: apply apply-remote-state apply-secrets apply-env apply-k8s-utils teardown-k8s-utils teardown-env teardown-secrets teardown-remote-state +.PHONY: apply apply-remote-state apply-secrets apply-env apply-k8s-utils teardown-k8s-utils teardown-env teardown-shared-env teardown-secrets teardown-remote-state teardown-shared-remote-state diff --git a/templates/terraform/README.md b/templates/terraform/README.md index 5c38195..07351a8 100644 --- a/templates/terraform/README.md +++ b/templates/terraform/README.md @@ -102,6 +102,26 @@ make update-k8s-conf ``` +If a user has a role other than admin (dev, operations, etc.) they can specify it here as well: +``` + ROLE= make update-k8s-conf + ``` + +## User Access + +You may want to give memebers of your team access to the infrastructure. +Individual roles and permissions are defined in `environments//user_access.tf`, these will define the amount of access a user in that role has to both AWS and Kubernetes. + + 1. Add users in `environments/shared/main.tf` and specify the role they should have in each environment, then run: +``` +make apply-shared-env +``` + + 2. To do the assignment of users to roles in each environment, you must run this for each: +``` +ENVIRONENT= make apply-env +``` +This should detect that there was a new user created, and put them into the necessary group. ## Upgrading an EKS Cluster diff --git a/templates/terraform/environments/prod/main.tf b/templates/terraform/environments/prod/main.tf index d461f7f..3b8ee43 100644 --- a/templates/terraform/environments/prod/main.tf +++ b/templates/terraform/environments/prod/main.tf @@ -9,9 +9,28 @@ terraform { } } +locals { + project = "<% .Name %>" + region = "<% index .Params `region` %>" + account_id = "<% index .Params `accountId` %>" + domain_name = "<% index .Params `productionHostRoot` %>" +} + provider "aws" { - region = "<% index .Params `region` %>" - allowed_account_ids = ["<% index .Params `accountId` %>"] + region = local.region + allowed_account_ids = [local.account_id] +} + +# remote state of "shared" +data "terraform_remote_state" "shared" { + backend = "s3" + config = { + bucket = "${local.project}-shared-terraform-state" + key = "infrastructure/terraform/environments/shared/main" + region = local.region + encrypt = true + dynamodb_table = "${local.project}-shared-terraform-state-locks" + } } # Instantiate the production environment @@ -20,9 +39,9 @@ module "prod" { environment = "prod" # Project configuration - project = "<% .Name %>" - region = "<% index .Params `region` %>" - allowed_account_ids = ["<% index .Params `accountId` %>"] + project = local.project + region = local.region + allowed_account_ids = [local.account_id] random_seed = "<% index .Params `randomSeed` %>" # ECR configuration @@ -35,15 +54,15 @@ module "prod" { eks_worker_asg_max_size = 4 # EKS-Optimized AMI for your region: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html - # https://<% index .Params `region` %>.console.aws.amazon.com/systems-manager/parameters/%252Faws%252Fservice%252Feks%252Foptimized-ami%252F1.17%252Famazon-linux-2%252Frecommended%252Fimage_id/description?region=<% index .Params `region` %> + # https://${local.region}.console.aws.amazon.com/systems-manager/parameters/%252Faws%252Fservice%252Feks%252Foptimized-ami%252F1.17%252Famazon-linux-2%252Frecommended%252Fimage_id/description?region=${local.region} eks_worker_ami = "<% index .Params `eksWorkerAMI` %>" # Hosting configuration. Each domain will have a bucket created for it, but may have mulitple aliases pointing to the same bucket. hosted_domains = [ - { domain : "<% index .Params `productionHostRoot` %>", aliases : [] }, - { domain : "<% index .Params `productionFrontendSubdomain` %><% index .Params `productionHostRoot` %>", aliases : [] }, + { domain : local.domain_name, aliases : [] }, + { domain : "<% index .Params `productionFrontendSubdomain` %>${local.domain_name}", aliases : [] }, ] - domain_name = "<% index .Params `productionHostRoot` %>" + domain_name = "${local.domain_name}" cf_signed_downloads = <% if eq (index .Params `fileUploads`) "yes" %>true<% else %>false<% end %> # DB configuration @@ -61,5 +80,21 @@ module "prod" { # See https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-limits.html sendgrid_enabled = <%if eq (index .Params `sendgridApiKey`) "" %>false<% else %>true<% end %> - sendgrid_api_key_secret_name = "<% .Name %>-sendgrid-<% index .Params `randomSeed` %>" + sendgrid_api_key_secret_name = "${local.project}-sendgrid-<% index .Params `randomSeed` %>" + + # Roles configuration + roles = [ + { + name = "developer" + aws_policy = data.aws_iam_policy_document.developer_access.json + k8s_policies = local.k8s_developer_access + }, + { + name = "operator" + aws_policy = data.aws_iam_policy_document.operator_access.json + k8s_policies = local.k8s_operator_access + } + ] + + user_role_mapping = data.terraform_remote_state.shared.outputs.user_role_mapping } diff --git a/templates/terraform/environments/prod/user_access.tf b/templates/terraform/environments/prod/user_access.tf new file mode 100644 index 0000000..0d91aa8 --- /dev/null +++ b/templates/terraform/environments/prod/user_access.tf @@ -0,0 +1,101 @@ +# define AWS policy documents for developer +data "aws_iam_policy_document" "developer_access" { + # EKS + statement { + effect = "Allow" + actions = ["eks:ListClusters"] + resources = ["*"] + } + statement { + effect = "Allow" + actions = ["eks:DescribeCluster"] + resources = ["arn:aws:eks:${local.region}:${local.account_id}:cluster/${local.project}-stage*"] + } + + # ECR + statement { + effect = "Allow" + actions = [ + "ecr:DescribeImages", + "ecr:DescribeRepositories" + ] + resources = ["*"] + } + + # S3 + statement { + effect = "Allow" + actions = ["s3:ListBucket"] + resources = ["arn:aws:s3:::*${local.domain_name}"] + } + statement { + effect = "Allow" + actions = ["s3:GetObject"] + resources = ["arn:aws:s3:::*${local.domain_name}/*"] + } +} + +# define AWS policy documents for operator +data "aws_iam_policy_document" "operator_access" { + # IAM + statement { + effect = "Allow" + actions = [ + "iam:ListRoles", + "sts:AssumeRole" + ] + resources = ["arn:aws:iam::${local.account_id}:role/${local.project}-kubernetes-operator-stage"] + } + + # EKS + statement { + effect = "Allow" + actions = ["eks:*"] + resources = ["arn:aws:eks:${local.region}:${local.account_id}:cluster/${local.project}-stage*"] + } + + # ECR + statement { + effect = "Allow" + actions = ["ecr:*"] + resources = ["*"] + } + + # S3 + statement { + effect = "Allow" + actions = ["s3:*"] + resources = ["arn:aws:s3:::*${local.domain_name}"] + } + statement { + effect = "Allow" + actions = ["s3:*"] + resources = ["arn:aws:s3:::*${local.domain_name}/*"] + } +} + + + +locals { + # define Kubernetes policy for developer + k8s_developer_access = [ + { + verbs = ["exec"] + api_groups = [""] + resources = ["pods", "pods/exec", "pods/log", "pods/portforward"] + }, { + verbs = ["get", "list", "watch"] + api_groups = [""] + resources = ["deployments", "configmaps", "pods", "services", "endpoints"] + } + ] + + # define Kubernetes policy for operator + k8s_operator_access = [ + { + verbs = ["exec", "create", "list", "get", "delete", "patch", "update"] + api_groups = [""] + resources = ["deployments", "configmaps", "pods", "secrets", "services", "endpoints"] + } + ] +} diff --git a/templates/terraform/environments/shared/main.tf b/templates/terraform/environments/shared/main.tf new file mode 100644 index 0000000..4b8b6d5 --- /dev/null +++ b/templates/terraform/environments/shared/main.tf @@ -0,0 +1,63 @@ +terraform { + required_version = ">= 0.13" + backend "s3" { + bucket = "<% .Name %>-shared-terraform-state" + key = "infrastructure/terraform/environments/shared/main" + encrypt = true + region = "<% index .Params `region` %>" + dynamodb_table = "<% .Name %>-shared-terraform-state-locks" + } +} + +locals { + project = "<% .Name %>" + region = "<% index .Params `region` %>" + account_id = "<% index .Params `accountId` %>" +} + +provider "aws" { + region = local.region + allowed_account_ids = [local.account_id] +} + +# Instantiate the environment +locals { + # Users configuration + users = [ +# { +# name = "dev1" +# roles = [ +# { name = "developer", environments = ["stage", "prod"] } +# ] +# }, { +# name = "devops1" +# roles = [ +# { name = "developer", environments = ["stage", "prod"] }, +# { name = "operator", environments = ["stage"] } +# ] +# }, { +# name = "operator1" +# roles = [ +# { name = "operator", environments = ["stage", "prod"] } +# ] +# }, + ] +} + +## Create users +resource "aws_iam_user" "access_user" { + count = length(local.users) + name = "${local.project}-${local.users[count.index].name}" + + tags = { + for r in local.users[count.index].roles : "role:${r.name}" => join("/", r.environments) + } +} + +output "iam_users" { + value = aws_iam_user.access_user +} + +output "user_role_mapping" { + value = local.users +} diff --git a/templates/terraform/environments/stage/main.tf b/templates/terraform/environments/stage/main.tf index 06ee9d3..b2756da 100644 --- a/templates/terraform/environments/stage/main.tf +++ b/templates/terraform/environments/stage/main.tf @@ -9,9 +9,28 @@ terraform { } } +locals { + project = "<% .Name %>" + region = "<% index .Params `region` %>" + account_id = "<% index .Params `accountId` %>" + domain_name = "<% index .Params `stagingHostRoot` %>" +} + provider "aws" { - region = "<% index .Params `region` %>" - allowed_account_ids = ["<% index .Params `accountId` %>"] + region = local.region + allowed_account_ids = [local.account_id] +} + +# remote state of "shared" +data "terraform_remote_state" "shared" { + backend = "s3" + config = { + bucket = "${local.project}-shared-terraform-state" + key = "infrastructure/terraform/environments/shared/main" + region = local.region + encrypt = true + dynamodb_table = "${local.project}-shared-terraform-state-locks" + } } # Instantiate the staging environment @@ -20,13 +39,13 @@ module "stage" { environment = "stage" # Project configuration - project = "<% .Name %>" - region = "<% index .Params `region` %>" - allowed_account_ids = ["<% index .Params `accountId` %>"] + project = local.project + region = local.region + allowed_account_ids = [local.account_id] random_seed = "<% index .Params `randomSeed` %>" # ECR configuration - ecr_repositories = [ "<% .Name %>" ] + ecr_repositories = [ local.project ] # EKS configuration eks_cluster_version = "1.17" @@ -35,15 +54,15 @@ module "stage" { eks_worker_asg_max_size = 3 # EKS-Optimized AMI for your region: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html - # https://<% index .Params `region` %>.console.aws.amazon.com/systems-manager/parameters/%252Faws%252Fservice%252Feks%252Foptimized-ami%252F1.17%252Famazon-linux-2%252Frecommended%252Fimage_id/description?region=<% index .Params `region` %> + # https://${local.region}.console.aws.amazon.com/systems-manager/parameters/%252Faws%252Fservice%252Feks%252Foptimized-ami%252F1.17%252Famazon-linux-2%252Frecommended%252Fimage_id/description?region=${local.region} eks_worker_ami = "<% index .Params `eksWorkerAMI` %>" # Hosting configuration. Each domain will have a bucket created for it, but may have mulitple aliases pointing to the same bucket. hosted_domains = [ - { domain : "<% index .Params `stagingHostRoot` %>", aliases : [] }, - { domain : "<% index .Params `stagingFrontendSubdomain` %><% index .Params `stagingHostRoot` %>", aliases : [] }, + { domain : local.domain_name, aliases : [] }, + { domain : "<% index .Params `stagingFrontendSubdomain` %>${local.domain_name}", aliases : [] }, ] - domain_name = "<% index .Params `stagingHostRoot` %>" + domain_name = local.domain_name cf_signed_downloads = <% if eq (index .Params `fileUploads`) "yes" %>true<% else %>false<% end %> # This will save some money as there a cost associated to each NAT gateway, but if the AZ with the gateway @@ -66,5 +85,21 @@ module "stage" { # See https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-limits.html sendgrid_enabled = <%if eq (index .Params `sendgridApiKey`) "" %>false<% else %>true<% end %> - sendgrid_api_key_secret_name = "<% .Name %>-sendgrid-<% index .Params `randomSeed` %>" + sendgrid_api_key_secret_name = "${local.project}-sendgrid-<% index .Params `randomSeed` %>" + + # Roles configuration + roles = [ + { + name = "developer" + aws_policy = data.aws_iam_policy_document.developer_access.json + k8s_policies = local.k8s_developer_access + }, + { + name = "operator" + aws_policy = data.aws_iam_policy_document.operator_access.json + k8s_policies = local.k8s_operator_access + } + ] + + user_role_mapping = data.terraform_remote_state.shared.outputs.user_role_mapping } diff --git a/templates/terraform/environments/stage/user_access.tf b/templates/terraform/environments/stage/user_access.tf new file mode 100644 index 0000000..2f3e543 --- /dev/null +++ b/templates/terraform/environments/stage/user_access.tf @@ -0,0 +1,101 @@ +# define AWS policy documents for developer +data "aws_iam_policy_document" "developer_access" { + # EKS + statement { + effect = "Allow" + actions = ["eks:ListClusters"] + resources = ["*"] + } + statement { + effect = "Allow" + actions = ["eks:DescribeCluster"] + resources = ["arn:aws:eks:${local.region}:${local.account_id}:cluster/${local.project}-prod*"] + } + + # ECR + statement { + effect = "Allow" + actions = [ + "ecr:DescribeImages", + "ecr:DescribeRepositories" + ] + resources = ["*"] + } + + # S3 + statement { + effect = "Allow" + actions = ["s3:ListBucket"] + resources = ["arn:aws:s3:::*${local.domain_name}"] + } + statement { + effect = "Allow" + actions = ["s3:GetObject"] + resources = ["arn:aws:s3:::*${local.domain_name}/*"] + } +} + +# define AWS policy documents for operator +data "aws_iam_policy_document" "operator_access" { + # IAM + statement { + effect = "Allow" + actions = [ + "iam:ListRoles", + "sts:AssumeRole" + ] + resources = ["arn:aws:iam::${local.account_id}:role/${local.project}-kubernetes-operator-prod"] + } + + # EKS + statement { + effect = "Allow" + actions = ["eks:*"] + resources = ["arn:aws:eks:${local.region}:${local.account_id}:cluster/${local.project}-prod*"] + } + + # ECR + statement { + effect = "Allow" + actions = ["ecr:*"] + resources = ["*"] + } + + # S3 + statement { + effect = "Allow" + actions = ["s3:*"] + resources = ["arn:aws:s3:::*${local.domain_name}"] + } + statement { + effect = "Allow" + actions = ["s3:*"] + resources = ["arn:aws:s3:::*${local.domain_name}/*"] + } +} + + + +locals { + # define Kubernetes policy for developer + k8s_developer_access = [ + { + verbs = ["exec"] + api_groups = [""] + resources = ["pods", "pods/exec", "pods/log", "pods/portforward"] + }, { + verbs = ["get", "list", "watch"] + api_groups = [""] + resources = ["deployments", "configmaps", "pods", "services", "endpoints"] + } + ] + + # define Kubernetes policy for operator + k8s_operator_access = [ + { + verbs = ["exec", "create", "list", "get", "delete", "patch", "update"] + api_groups = [""] + resources = ["deployments", "configmaps", "pods", "secrets", "services", "endpoints"] + } + ] +} diff --git a/templates/terraform/modules/environment/main.tf b/templates/terraform/modules/environment/main.tf index f4aa610..3c6f268 100644 --- a/templates/terraform/modules/environment/main.tf +++ b/templates/terraform/modules/environment/main.tf @@ -8,6 +8,19 @@ data "aws_iam_user" "ci_user" { user_name = "${var.project}-ci-user" # Should have been created in the bootstrap process } +locals { + role_name_list = var.roles.*.name + users = [ + for u in var.user_role_mapping : { + name = u.name + roles = [ + for r in u.roles : + r.name if contains(local.role_name_list, r.name) && contains(r.environments, var.environment) + ] + } + ] +} + module "vpc" { source = "commitdev/zero/aws//modules/vpc" @@ -28,7 +41,7 @@ data "aws_caller_identity" "current" {} # Provision the EKS cluster module "eks" { source = "commitdev/zero/aws//modules/eks" - version = "0.0.2" + version = "0.1.2" providers = { aws = aws.for_eks } @@ -47,6 +60,8 @@ module "eks" { worker_asg_min_size = var.eks_worker_asg_min_size worker_asg_max_size = var.eks_worker_asg_max_size worker_ami = var.eks_worker_ami # EKS-Optimized AMI for your region: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + + iam_role_mapping = module.user_access.eks_iam_role_mapping } @@ -88,7 +103,7 @@ module "s3_hosting" { module "db" { source = "commitdev/zero/aws//modules/database" - version = "0.0.1" + version = "0.1.1" project = var.project environment = var.environment @@ -111,7 +126,7 @@ module "ecr" { module "logging" { source = "commitdev/zero/aws//modules/logging" - version = "0.0.1" + version = "0.1.0" count = var.logging_type == "kibana" ? 1 : 0 @@ -135,3 +150,14 @@ module "sendgrid" { zone_name = var.domain_name sendgrid_api_key_secret_name = var.sendgrid_api_key_secret_name } + +module "user_access" { + source = "commitdev/zero/aws//modules//user_access" + version = "0.1.2" + + project = var.project + environment = var.environment + + roles = var.roles + users = local.users +} diff --git a/templates/terraform/modules/environment/variables.tf b/templates/terraform/modules/environment/variables.tf index 246ff8c..bf465a5 100644 --- a/templates/terraform/modules/environment/variables.tf +++ b/templates/terraform/modules/environment/variables.tf @@ -146,3 +146,23 @@ variable "cf_signed_downloads" { description = "Enable Cloudfront signed URLs" default = false } + +variable "roles" { + type = list(object({ + name = string + aws_policy = string + k8s_policies = list(map(list(string))) + })) + description = "Role list with policies" +} + +variable "user_role_mapping" { + type = list(object({ + name = string + roles = list(object({ + name = string + environments = list(string) + })) + })) + description = "User-Roles mapping with environment" +}