784 lines
19 KiB
HCL
784 lines
19 KiB
HCL
# WiFi-DensePose AWS Infrastructure
|
|
# This Terraform configuration provisions the AWS infrastructure for WiFi-DensePose
|
|
|
|
terraform {
|
|
required_version = ">= 1.0"
|
|
required_providers {
|
|
aws = {
|
|
source = "hashicorp/aws"
|
|
version = "~> 5.0"
|
|
}
|
|
kubernetes = {
|
|
source = "hashicorp/kubernetes"
|
|
version = "~> 2.20"
|
|
}
|
|
helm = {
|
|
source = "hashicorp/helm"
|
|
version = "~> 2.10"
|
|
}
|
|
random = {
|
|
source = "hashicorp/random"
|
|
version = "~> 3.1"
|
|
}
|
|
}
|
|
|
|
backend "s3" {
|
|
bucket = "wifi-densepose-terraform-state"
|
|
key = "infrastructure/terraform.tfstate"
|
|
region = "us-west-2"
|
|
encrypt = true
|
|
dynamodb_table = "wifi-densepose-terraform-locks"
|
|
}
|
|
}
|
|
|
|
# Configure AWS Provider
|
|
provider "aws" {
|
|
region = var.aws_region
|
|
|
|
default_tags {
|
|
tags = {
|
|
Project = "WiFi-DensePose"
|
|
Environment = var.environment
|
|
ManagedBy = "Terraform"
|
|
Owner = var.owner
|
|
}
|
|
}
|
|
}
|
|
|
|
# Data sources
|
|
data "aws_availability_zones" "available" {
|
|
state = "available"
|
|
}
|
|
|
|
data "aws_caller_identity" "current" {}
|
|
|
|
# Random password for database
|
|
resource "random_password" "db_password" {
|
|
length = 32
|
|
special = true
|
|
}
|
|
|
|
# VPC Configuration
|
|
resource "aws_vpc" "main" {
|
|
cidr_block = var.vpc_cidr
|
|
enable_dns_hostnames = true
|
|
enable_dns_support = true
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-vpc"
|
|
}
|
|
}
|
|
|
|
# Internet Gateway
|
|
resource "aws_internet_gateway" "main" {
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-igw"
|
|
}
|
|
}
|
|
|
|
# Public Subnets
|
|
resource "aws_subnet" "public" {
|
|
count = length(var.public_subnet_cidrs)
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
cidr_block = var.public_subnet_cidrs[count.index]
|
|
availability_zone = data.aws_availability_zones.available.names[count.index]
|
|
map_public_ip_on_launch = true
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-public-subnet-${count.index + 1}"
|
|
Type = "Public"
|
|
}
|
|
}
|
|
|
|
# Private Subnets
|
|
resource "aws_subnet" "private" {
|
|
count = length(var.private_subnet_cidrs)
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
cidr_block = var.private_subnet_cidrs[count.index]
|
|
availability_zone = data.aws_availability_zones.available.names[count.index]
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-private-subnet-${count.index + 1}"
|
|
Type = "Private"
|
|
}
|
|
}
|
|
|
|
# NAT Gateway
|
|
resource "aws_eip" "nat" {
|
|
count = length(aws_subnet.public)
|
|
|
|
domain = "vpc"
|
|
depends_on = [aws_internet_gateway.main]
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-nat-eip-${count.index + 1}"
|
|
}
|
|
}
|
|
|
|
resource "aws_nat_gateway" "main" {
|
|
count = length(aws_subnet.public)
|
|
|
|
allocation_id = aws_eip.nat[count.index].id
|
|
subnet_id = aws_subnet.public[count.index].id
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-nat-gateway-${count.index + 1}"
|
|
}
|
|
|
|
depends_on = [aws_internet_gateway.main]
|
|
}
|
|
|
|
# Route Tables
|
|
resource "aws_route_table" "public" {
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
route {
|
|
cidr_block = "0.0.0.0/0"
|
|
gateway_id = aws_internet_gateway.main.id
|
|
}
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-public-rt"
|
|
}
|
|
}
|
|
|
|
resource "aws_route_table" "private" {
|
|
count = length(aws_nat_gateway.main)
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
route {
|
|
cidr_block = "0.0.0.0/0"
|
|
nat_gateway_id = aws_nat_gateway.main[count.index].id
|
|
}
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-private-rt-${count.index + 1}"
|
|
}
|
|
}
|
|
|
|
# Route Table Associations
|
|
resource "aws_route_table_association" "public" {
|
|
count = length(aws_subnet.public)
|
|
|
|
subnet_id = aws_subnet.public[count.index].id
|
|
route_table_id = aws_route_table.public.id
|
|
}
|
|
|
|
resource "aws_route_table_association" "private" {
|
|
count = length(aws_subnet.private)
|
|
|
|
subnet_id = aws_subnet.private[count.index].id
|
|
route_table_id = aws_route_table.private[count.index].id
|
|
}
|
|
|
|
# Security Groups
|
|
resource "aws_security_group" "eks_cluster" {
|
|
name_prefix = "${var.project_name}-eks-cluster"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
egress {
|
|
from_port = 0
|
|
to_port = 0
|
|
protocol = "-1"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-eks-cluster-sg"
|
|
}
|
|
}
|
|
|
|
resource "aws_security_group" "eks_nodes" {
|
|
name_prefix = "${var.project_name}-eks-nodes"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
ingress {
|
|
from_port = 0
|
|
to_port = 65535
|
|
protocol = "tcp"
|
|
self = true
|
|
}
|
|
|
|
egress {
|
|
from_port = 0
|
|
to_port = 0
|
|
protocol = "-1"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-eks-nodes-sg"
|
|
}
|
|
}
|
|
|
|
resource "aws_security_group" "rds" {
|
|
name_prefix = "${var.project_name}-rds"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
ingress {
|
|
from_port = 5432
|
|
to_port = 5432
|
|
protocol = "tcp"
|
|
security_groups = [aws_security_group.eks_nodes.id]
|
|
}
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-rds-sg"
|
|
}
|
|
}
|
|
|
|
# EKS Cluster
|
|
resource "aws_eks_cluster" "main" {
|
|
name = "${var.project_name}-cluster"
|
|
role_arn = aws_iam_role.eks_cluster.arn
|
|
version = var.kubernetes_version
|
|
|
|
vpc_config {
|
|
subnet_ids = concat(aws_subnet.public[*].id, aws_subnet.private[*].id)
|
|
endpoint_private_access = true
|
|
endpoint_public_access = true
|
|
security_group_ids = [aws_security_group.eks_cluster.id]
|
|
}
|
|
|
|
encryption_config {
|
|
provider {
|
|
key_arn = aws_kms_key.eks.arn
|
|
}
|
|
resources = ["secrets"]
|
|
}
|
|
|
|
enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
|
|
|
|
depends_on = [
|
|
aws_iam_role_policy_attachment.eks_cluster_policy,
|
|
aws_iam_role_policy_attachment.eks_vpc_resource_controller,
|
|
]
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-eks-cluster"
|
|
}
|
|
}
|
|
|
|
# EKS Node Group
|
|
resource "aws_eks_node_group" "main" {
|
|
cluster_name = aws_eks_cluster.main.name
|
|
node_group_name = "${var.project_name}-nodes"
|
|
node_role_arn = aws_iam_role.eks_nodes.arn
|
|
subnet_ids = aws_subnet.private[*].id
|
|
|
|
capacity_type = "ON_DEMAND"
|
|
instance_types = var.node_instance_types
|
|
|
|
scaling_config {
|
|
desired_size = var.node_desired_size
|
|
max_size = var.node_max_size
|
|
min_size = var.node_min_size
|
|
}
|
|
|
|
update_config {
|
|
max_unavailable = 1
|
|
}
|
|
|
|
remote_access {
|
|
ec2_ssh_key = var.key_pair_name
|
|
source_security_group_ids = [aws_security_group.eks_nodes.id]
|
|
}
|
|
|
|
depends_on = [
|
|
aws_iam_role_policy_attachment.eks_worker_node_policy,
|
|
aws_iam_role_policy_attachment.eks_cni_policy,
|
|
aws_iam_role_policy_attachment.eks_container_registry_policy,
|
|
]
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-eks-nodes"
|
|
}
|
|
}
|
|
|
|
# IAM Roles
|
|
resource "aws_iam_role" "eks_cluster" {
|
|
name = "${var.project_name}-eks-cluster-role"
|
|
|
|
assume_role_policy = jsonencode({
|
|
Statement = [{
|
|
Action = "sts:AssumeRole"
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "eks.amazonaws.com"
|
|
}
|
|
}]
|
|
Version = "2012-10-17"
|
|
})
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_cluster_policy" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
|
|
role = aws_iam_role.eks_cluster.name
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_vpc_resource_controller" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
|
|
role = aws_iam_role.eks_cluster.name
|
|
}
|
|
|
|
resource "aws_iam_role" "eks_nodes" {
|
|
name = "${var.project_name}-eks-nodes-role"
|
|
|
|
assume_role_policy = jsonencode({
|
|
Statement = [{
|
|
Action = "sts:AssumeRole"
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "ec2.amazonaws.com"
|
|
}
|
|
}]
|
|
Version = "2012-10-17"
|
|
})
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_worker_node_policy" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
|
|
role = aws_iam_role.eks_nodes.name
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_cni_policy" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
|
|
role = aws_iam_role.eks_nodes.name
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_container_registry_policy" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
|
role = aws_iam_role.eks_nodes.name
|
|
}
|
|
|
|
# KMS Key for EKS encryption
|
|
resource "aws_kms_key" "eks" {
|
|
description = "EKS Secret Encryption Key"
|
|
deletion_window_in_days = 7
|
|
enable_key_rotation = true
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-eks-encryption-key"
|
|
}
|
|
}
|
|
|
|
resource "aws_kms_alias" "eks" {
|
|
name = "alias/${var.project_name}-eks"
|
|
target_key_id = aws_kms_key.eks.key_id
|
|
}
|
|
|
|
# RDS Subnet Group
|
|
resource "aws_db_subnet_group" "main" {
|
|
name = "${var.project_name}-db-subnet-group"
|
|
subnet_ids = aws_subnet.private[*].id
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-db-subnet-group"
|
|
}
|
|
}
|
|
|
|
# RDS Instance
|
|
resource "aws_db_instance" "main" {
|
|
identifier = "${var.project_name}-database"
|
|
|
|
engine = "postgres"
|
|
engine_version = var.postgres_version
|
|
instance_class = var.db_instance_class
|
|
|
|
allocated_storage = var.db_allocated_storage
|
|
max_allocated_storage = var.db_max_allocated_storage
|
|
storage_type = "gp3"
|
|
storage_encrypted = true
|
|
kms_key_id = aws_kms_key.rds.arn
|
|
|
|
db_name = var.db_name
|
|
username = var.db_username
|
|
password = random_password.db_password.result
|
|
|
|
vpc_security_group_ids = [aws_security_group.rds.id]
|
|
db_subnet_group_name = aws_db_subnet_group.main.name
|
|
|
|
backup_retention_period = var.db_backup_retention_period
|
|
backup_window = "03:00-04:00"
|
|
maintenance_window = "sun:04:00-sun:05:00"
|
|
|
|
skip_final_snapshot = false
|
|
final_snapshot_identifier = "${var.project_name}-final-snapshot-${formatdate("YYYY-MM-DD-hhmm", timestamp())}"
|
|
|
|
performance_insights_enabled = true
|
|
monitoring_interval = 60
|
|
monitoring_role_arn = aws_iam_role.rds_monitoring.arn
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-database"
|
|
}
|
|
}
|
|
|
|
# KMS Key for RDS encryption
|
|
resource "aws_kms_key" "rds" {
|
|
description = "RDS Encryption Key"
|
|
deletion_window_in_days = 7
|
|
enable_key_rotation = true
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-rds-encryption-key"
|
|
}
|
|
}
|
|
|
|
resource "aws_kms_alias" "rds" {
|
|
name = "alias/${var.project_name}-rds"
|
|
target_key_id = aws_kms_key.rds.key_id
|
|
}
|
|
|
|
# RDS Monitoring Role
|
|
resource "aws_iam_role" "rds_monitoring" {
|
|
name = "${var.project_name}-rds-monitoring-role"
|
|
|
|
assume_role_policy = jsonencode({
|
|
Statement = [{
|
|
Action = "sts:AssumeRole"
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "monitoring.rds.amazonaws.com"
|
|
}
|
|
}]
|
|
Version = "2012-10-17"
|
|
})
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "rds_monitoring" {
|
|
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"
|
|
role = aws_iam_role.rds_monitoring.name
|
|
}
|
|
|
|
# ElastiCache Subnet Group
|
|
resource "aws_elasticache_subnet_group" "main" {
|
|
name = "${var.project_name}-cache-subnet-group"
|
|
subnet_ids = aws_subnet.private[*].id
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-cache-subnet-group"
|
|
}
|
|
}
|
|
|
|
# ElastiCache Redis Cluster
|
|
resource "aws_elasticache_replication_group" "main" {
|
|
replication_group_id = "${var.project_name}-redis"
|
|
description = "Redis cluster for WiFi-DensePose"
|
|
|
|
node_type = var.redis_node_type
|
|
port = 6379
|
|
parameter_group_name = "default.redis7"
|
|
|
|
num_cache_clusters = var.redis_num_cache_nodes
|
|
automatic_failover_enabled = var.redis_num_cache_nodes > 1
|
|
multi_az_enabled = var.redis_num_cache_nodes > 1
|
|
|
|
subnet_group_name = aws_elasticache_subnet_group.main.name
|
|
security_group_ids = [aws_security_group.redis.id]
|
|
|
|
at_rest_encryption_enabled = true
|
|
transit_encryption_enabled = true
|
|
auth_token = random_password.redis_auth_token.result
|
|
|
|
snapshot_retention_limit = 5
|
|
snapshot_window = "03:00-05:00"
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-redis"
|
|
}
|
|
}
|
|
|
|
# Redis Security Group
|
|
resource "aws_security_group" "redis" {
|
|
name_prefix = "${var.project_name}-redis"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
ingress {
|
|
from_port = 6379
|
|
to_port = 6379
|
|
protocol = "tcp"
|
|
security_groups = [aws_security_group.eks_nodes.id]
|
|
}
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-redis-sg"
|
|
}
|
|
}
|
|
|
|
# Redis Auth Token
|
|
resource "random_password" "redis_auth_token" {
|
|
length = 32
|
|
special = false
|
|
}
|
|
|
|
# S3 Bucket for application data
|
|
resource "aws_s3_bucket" "app_data" {
|
|
bucket = "${var.project_name}-app-data-${random_id.bucket_suffix.hex}"
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-app-data"
|
|
}
|
|
}
|
|
|
|
resource "random_id" "bucket_suffix" {
|
|
byte_length = 4
|
|
}
|
|
|
|
resource "aws_s3_bucket_versioning" "app_data" {
|
|
bucket = aws_s3_bucket.app_data.id
|
|
versioning_configuration {
|
|
status = "Enabled"
|
|
}
|
|
}
|
|
|
|
resource "aws_s3_bucket_encryption" "app_data" {
|
|
bucket = aws_s3_bucket.app_data.id
|
|
|
|
server_side_encryption_configuration {
|
|
rule {
|
|
apply_server_side_encryption_by_default {
|
|
kms_master_key_id = aws_kms_key.s3.arn
|
|
sse_algorithm = "aws:kms"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
resource "aws_s3_bucket_public_access_block" "app_data" {
|
|
bucket = aws_s3_bucket.app_data.id
|
|
|
|
block_public_acls = true
|
|
block_public_policy = true
|
|
ignore_public_acls = true
|
|
restrict_public_buckets = true
|
|
}
|
|
|
|
# KMS Key for S3 encryption
|
|
resource "aws_kms_key" "s3" {
|
|
description = "S3 Encryption Key"
|
|
deletion_window_in_days = 7
|
|
enable_key_rotation = true
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-s3-encryption-key"
|
|
}
|
|
}
|
|
|
|
resource "aws_kms_alias" "s3" {
|
|
name = "alias/${var.project_name}-s3"
|
|
target_key_id = aws_kms_key.s3.key_id
|
|
}
|
|
|
|
# CloudWatch Log Groups
|
|
resource "aws_cloudwatch_log_group" "eks_cluster" {
|
|
name = "/aws/eks/${aws_eks_cluster.main.name}/cluster"
|
|
retention_in_days = var.log_retention_days
|
|
kms_key_id = aws_kms_key.cloudwatch.arn
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-eks-logs"
|
|
}
|
|
}
|
|
|
|
# KMS Key for CloudWatch encryption
|
|
resource "aws_kms_key" "cloudwatch" {
|
|
description = "CloudWatch Logs Encryption Key"
|
|
deletion_window_in_days = 7
|
|
enable_key_rotation = true
|
|
|
|
policy = jsonencode({
|
|
Statement = [
|
|
{
|
|
Sid = "Enable IAM User Permissions"
|
|
Effect = "Allow"
|
|
Principal = {
|
|
AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"
|
|
}
|
|
Action = "kms:*"
|
|
Resource = "*"
|
|
},
|
|
{
|
|
Sid = "Allow CloudWatch Logs"
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "logs.${var.aws_region}.amazonaws.com"
|
|
}
|
|
Action = [
|
|
"kms:Encrypt",
|
|
"kms:Decrypt",
|
|
"kms:ReEncrypt*",
|
|
"kms:GenerateDataKey*",
|
|
"kms:DescribeKey"
|
|
]
|
|
Resource = "*"
|
|
}
|
|
]
|
|
Version = "2012-10-17"
|
|
})
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-cloudwatch-encryption-key"
|
|
}
|
|
}
|
|
|
|
resource "aws_kms_alias" "cloudwatch" {
|
|
name = "alias/${var.project_name}-cloudwatch"
|
|
target_key_id = aws_kms_key.cloudwatch.key_id
|
|
}
|
|
|
|
# Application Load Balancer
|
|
resource "aws_lb" "main" {
|
|
name = "${var.project_name}-alb"
|
|
internal = false
|
|
load_balancer_type = "application"
|
|
security_groups = [aws_security_group.alb.id]
|
|
subnets = aws_subnet.public[*].id
|
|
|
|
enable_deletion_protection = var.environment == "production"
|
|
|
|
access_logs {
|
|
bucket = aws_s3_bucket.alb_logs.bucket
|
|
prefix = "alb-logs"
|
|
enabled = true
|
|
}
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-alb"
|
|
}
|
|
}
|
|
|
|
# ALB Security Group
|
|
resource "aws_security_group" "alb" {
|
|
name_prefix = "${var.project_name}-alb"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
ingress {
|
|
from_port = 80
|
|
to_port = 80
|
|
protocol = "tcp"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
ingress {
|
|
from_port = 443
|
|
to_port = 443
|
|
protocol = "tcp"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
egress {
|
|
from_port = 0
|
|
to_port = 0
|
|
protocol = "-1"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-alb-sg"
|
|
}
|
|
}
|
|
|
|
# S3 Bucket for ALB logs
|
|
resource "aws_s3_bucket" "alb_logs" {
|
|
bucket = "${var.project_name}-alb-logs-${random_id.bucket_suffix.hex}"
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-alb-logs"
|
|
}
|
|
}
|
|
|
|
resource "aws_s3_bucket_policy" "alb_logs" {
|
|
bucket = aws_s3_bucket.alb_logs.id
|
|
|
|
policy = jsonencode({
|
|
Statement = [
|
|
{
|
|
Effect = "Allow"
|
|
Principal = {
|
|
AWS = "arn:aws:iam::${data.aws_elb_service_account.main.id}:root"
|
|
}
|
|
Action = "s3:PutObject"
|
|
Resource = "${aws_s3_bucket.alb_logs.arn}/alb-logs/AWSLogs/${data.aws_caller_identity.current.account_id}/*"
|
|
},
|
|
{
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "delivery.logs.amazonaws.com"
|
|
}
|
|
Action = "s3:PutObject"
|
|
Resource = "${aws_s3_bucket.alb_logs.arn}/alb-logs/AWSLogs/${data.aws_caller_identity.current.account_id}/*"
|
|
Condition = {
|
|
StringEquals = {
|
|
"s3:x-amz-acl" = "bucket-owner-full-control"
|
|
}
|
|
}
|
|
},
|
|
{
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "delivery.logs.amazonaws.com"
|
|
}
|
|
Action = "s3:GetBucketAcl"
|
|
Resource = aws_s3_bucket.alb_logs.arn
|
|
}
|
|
]
|
|
Version = "2012-10-17"
|
|
})
|
|
}
|
|
|
|
data "aws_elb_service_account" "main" {}
|
|
|
|
# Secrets Manager for application secrets
|
|
resource "aws_secretsmanager_secret" "app_secrets" {
|
|
name = "${var.project_name}-app-secrets"
|
|
description = "Application secrets for WiFi-DensePose"
|
|
recovery_window_in_days = 7
|
|
kms_key_id = aws_kms_key.secrets.arn
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-app-secrets"
|
|
}
|
|
}
|
|
|
|
resource "aws_secretsmanager_secret_version" "app_secrets" {
|
|
secret_id = aws_secretsmanager_secret.app_secrets.id
|
|
secret_string = jsonencode({
|
|
database_url = "postgresql://${aws_db_instance.main.username}:${random_password.db_password.result}@${aws_db_instance.main.endpoint}/${aws_db_instance.main.db_name}"
|
|
redis_url = "redis://:${random_password.redis_auth_token.result}@${aws_elasticache_replication_group.main.primary_endpoint_address}:6379"
|
|
secret_key = random_password.app_secret_key.result
|
|
jwt_secret = random_password.jwt_secret.result
|
|
})
|
|
}
|
|
|
|
# Additional random passwords
|
|
resource "random_password" "app_secret_key" {
|
|
length = 64
|
|
special = true
|
|
}
|
|
|
|
resource "random_password" "jwt_secret" {
|
|
length = 64
|
|
special = true
|
|
}
|
|
|
|
# KMS Key for Secrets Manager
|
|
resource "aws_kms_key" "secrets" {
|
|
description = "Secrets Manager Encryption Key"
|
|
deletion_window_in_days = 7
|
|
enable_key_rotation = true
|
|
|
|
tags = {
|
|
Name = "${var.project_name}-secrets-encryption-key"
|
|
}
|
|
}
|
|
|
|
resource "aws_kms_alias" "secrets" {
|
|
name = "alias/${var.project_name}-secrets"
|
|
target_key_id = aws_kms_key.secrets.key_id
|
|
} |