Hearth is the infrastructure home for the letemcook ecosystem. Ported from coherence-mcp/infra: - Terraform modules (VPC, EKS, IAM, NLB, S3, storage) - Kubernetes manifests (Forgejo, ingress, cert-manager, karpenter) - Deployment scripts (phased rollout) Status: Not deployed. EKS cluster needs to be provisioned. Next steps: 1. Bootstrap terraform backend 2. Deploy phase 1 (foundation) 3. Deploy phase 2 (core services including Forgejo) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
344 lines
9.2 KiB
HCL
344 lines
9.2 KiB
HCL
# VPC Module - Multi-AZ Networking
|
|
# RFC 0039: ADR-Compliant Foundation Infrastructure
|
|
#
|
|
# Architecture:
|
|
# - 3 AZs for high availability
|
|
# - Public subnets for NLB and NAT Gateways
|
|
# - Private subnets for EKS nodes and workloads
|
|
# - Database subnets for CockroachDB (isolated)
|
|
# - NAT Gateway per AZ for HA outbound traffic
|
|
|
|
locals {
|
|
# Calculate subnet CIDRs
|
|
# /16 VPC -> /20 subnets (4096 IPs each)
|
|
# AZ a: public=10.0.0.0/20, private=10.0.16.0/20, database=10.0.32.0/20
|
|
# AZ b: public=10.0.48.0/20, private=10.0.64.0/20, database=10.0.80.0/20
|
|
# AZ c: public=10.0.96.0/20, private=10.0.112.0/20, database=10.0.128.0/20
|
|
public_subnets = [for i, az in var.availability_zones : cidrsubnet(var.cidr, 4, i * 3)]
|
|
private_subnets = [for i, az in var.availability_zones : cidrsubnet(var.cidr, 4, i * 3 + 1)]
|
|
database_subnets = [for i, az in var.availability_zones : cidrsubnet(var.cidr, 4, i * 3 + 2)]
|
|
}
|
|
|
|
# VPC
|
|
resource "aws_vpc" "main" {
|
|
cidr_block = var.cidr
|
|
enable_dns_hostnames = true
|
|
enable_dns_support = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = var.name
|
|
})
|
|
}
|
|
|
|
# Internet Gateway
|
|
resource "aws_internet_gateway" "main" {
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-igw"
|
|
})
|
|
}
|
|
|
|
# Public Subnets
|
|
resource "aws_subnet" "public" {
|
|
count = length(var.availability_zones)
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
cidr_block = local.public_subnets[count.index]
|
|
availability_zone = var.availability_zones[count.index]
|
|
map_public_ip_on_launch = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-public-${var.availability_zones[count.index]}"
|
|
"kubernetes.io/role/elb" = "1"
|
|
"kubernetes.io/cluster/${var.name}" = "shared"
|
|
})
|
|
}
|
|
|
|
# Private Subnets
|
|
resource "aws_subnet" "private" {
|
|
count = length(var.availability_zones)
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
cidr_block = local.private_subnets[count.index]
|
|
availability_zone = var.availability_zones[count.index]
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-private-${var.availability_zones[count.index]}"
|
|
"kubernetes.io/role/internal-elb" = "1"
|
|
"kubernetes.io/cluster/${var.name}" = "shared"
|
|
"karpenter.sh/discovery" = "true"
|
|
})
|
|
}
|
|
|
|
# Database Subnets (isolated - no internet access)
|
|
resource "aws_subnet" "database" {
|
|
count = length(var.availability_zones)
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
cidr_block = local.database_subnets[count.index]
|
|
availability_zone = var.availability_zones[count.index]
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-database-${var.availability_zones[count.index]}"
|
|
})
|
|
}
|
|
|
|
# Elastic IPs for NAT Gateways
|
|
resource "aws_eip" "nat" {
|
|
count = var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : length(var.availability_zones)) : 0
|
|
|
|
domain = "vpc"
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-nat-${count.index + 1}"
|
|
})
|
|
|
|
depends_on = [aws_internet_gateway.main]
|
|
}
|
|
|
|
# NAT Gateways
|
|
resource "aws_nat_gateway" "main" {
|
|
count = var.enable_nat_gateway ? (var.single_nat_gateway ? 1 : length(var.availability_zones)) : 0
|
|
|
|
allocation_id = aws_eip.nat[count.index].id
|
|
subnet_id = aws_subnet.public[count.index].id
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-nat-${var.availability_zones[count.index]}"
|
|
})
|
|
|
|
depends_on = [aws_internet_gateway.main]
|
|
}
|
|
|
|
# Public Route Table
|
|
resource "aws_route_table" "public" {
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-public"
|
|
})
|
|
}
|
|
|
|
resource "aws_route" "public_internet" {
|
|
route_table_id = aws_route_table.public.id
|
|
destination_cidr_block = "0.0.0.0/0"
|
|
gateway_id = aws_internet_gateway.main.id
|
|
}
|
|
|
|
resource "aws_route_table_association" "public" {
|
|
count = length(var.availability_zones)
|
|
|
|
subnet_id = aws_subnet.public[count.index].id
|
|
route_table_id = aws_route_table.public.id
|
|
}
|
|
|
|
# Private Route Tables (one per AZ for HA)
|
|
resource "aws_route_table" "private" {
|
|
count = length(var.availability_zones)
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-private-${var.availability_zones[count.index]}"
|
|
})
|
|
}
|
|
|
|
resource "aws_route" "private_nat" {
|
|
count = var.enable_nat_gateway ? length(var.availability_zones) : 0
|
|
|
|
route_table_id = aws_route_table.private[count.index].id
|
|
destination_cidr_block = "0.0.0.0/0"
|
|
nat_gateway_id = var.single_nat_gateway ? aws_nat_gateway.main[0].id : aws_nat_gateway.main[count.index].id
|
|
}
|
|
|
|
resource "aws_route_table_association" "private" {
|
|
count = length(var.availability_zones)
|
|
|
|
subnet_id = aws_subnet.private[count.index].id
|
|
route_table_id = aws_route_table.private[count.index].id
|
|
}
|
|
|
|
# Database Route Table (no internet access)
|
|
resource "aws_route_table" "database" {
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-database"
|
|
})
|
|
}
|
|
|
|
resource "aws_route_table_association" "database" {
|
|
count = length(var.availability_zones)
|
|
|
|
subnet_id = aws_subnet.database[count.index].id
|
|
route_table_id = aws_route_table.database.id
|
|
}
|
|
|
|
# VPC Flow Logs for security auditing
|
|
resource "aws_flow_log" "main" {
|
|
count = var.enable_flow_logs ? 1 : 0
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
traffic_type = "ALL"
|
|
log_destination_type = "cloud-watch-logs"
|
|
log_destination = aws_cloudwatch_log_group.flow_logs[0].arn
|
|
iam_role_arn = aws_iam_role.flow_logs[0].arn
|
|
max_aggregation_interval = 60
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-flow-logs"
|
|
})
|
|
}
|
|
|
|
resource "aws_cloudwatch_log_group" "flow_logs" {
|
|
count = var.enable_flow_logs ? 1 : 0
|
|
|
|
name = "/aws/vpc/${var.name}/flow-logs"
|
|
retention_in_days = 30
|
|
|
|
tags = var.tags
|
|
}
|
|
|
|
resource "aws_iam_role" "flow_logs" {
|
|
count = var.enable_flow_logs ? 1 : 0
|
|
|
|
name = "${var.name}-flow-logs"
|
|
|
|
assume_role_policy = jsonencode({
|
|
Version = "2012-10-17"
|
|
Statement = [
|
|
{
|
|
Action = "sts:AssumeRole"
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "vpc-flow-logs.amazonaws.com"
|
|
}
|
|
}
|
|
]
|
|
})
|
|
|
|
tags = var.tags
|
|
}
|
|
|
|
resource "aws_iam_role_policy" "flow_logs" {
|
|
count = var.enable_flow_logs ? 1 : 0
|
|
|
|
name = "${var.name}-flow-logs"
|
|
role = aws_iam_role.flow_logs[0].id
|
|
|
|
policy = jsonencode({
|
|
Version = "2012-10-17"
|
|
Statement = [
|
|
{
|
|
Effect = "Allow"
|
|
Action = [
|
|
"logs:CreateLogGroup",
|
|
"logs:CreateLogStream",
|
|
"logs:PutLogEvents",
|
|
"logs:DescribeLogGroups",
|
|
"logs:DescribeLogStreams"
|
|
]
|
|
Resource = "*"
|
|
}
|
|
]
|
|
})
|
|
}
|
|
|
|
# VPC Endpoints for AWS services (cost optimization - no NAT charges)
|
|
resource "aws_vpc_endpoint" "s3" {
|
|
vpc_id = aws_vpc.main.id
|
|
service_name = "com.amazonaws.${data.aws_region.current.name}.s3"
|
|
vpc_endpoint_type = "Gateway"
|
|
|
|
route_table_ids = concat(
|
|
[aws_route_table.public.id],
|
|
aws_route_table.private[*].id,
|
|
[aws_route_table.database.id]
|
|
)
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-s3-endpoint"
|
|
})
|
|
}
|
|
|
|
resource "aws_vpc_endpoint" "ecr_api" {
|
|
vpc_id = aws_vpc.main.id
|
|
service_name = "com.amazonaws.${data.aws_region.current.name}.ecr.api"
|
|
vpc_endpoint_type = "Interface"
|
|
subnet_ids = aws_subnet.private[*].id
|
|
security_group_ids = [aws_security_group.vpc_endpoints.id]
|
|
private_dns_enabled = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-ecr-api-endpoint"
|
|
})
|
|
}
|
|
|
|
resource "aws_vpc_endpoint" "ecr_dkr" {
|
|
vpc_id = aws_vpc.main.id
|
|
service_name = "com.amazonaws.${data.aws_region.current.name}.ecr.dkr"
|
|
vpc_endpoint_type = "Interface"
|
|
subnet_ids = aws_subnet.private[*].id
|
|
security_group_ids = [aws_security_group.vpc_endpoints.id]
|
|
private_dns_enabled = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-ecr-dkr-endpoint"
|
|
})
|
|
}
|
|
|
|
resource "aws_vpc_endpoint" "sts" {
|
|
vpc_id = aws_vpc.main.id
|
|
service_name = "com.amazonaws.${data.aws_region.current.name}.sts"
|
|
vpc_endpoint_type = "Interface"
|
|
subnet_ids = aws_subnet.private[*].id
|
|
security_group_ids = [aws_security_group.vpc_endpoints.id]
|
|
private_dns_enabled = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-sts-endpoint"
|
|
})
|
|
}
|
|
|
|
resource "aws_vpc_endpoint" "ec2" {
|
|
vpc_id = aws_vpc.main.id
|
|
service_name = "com.amazonaws.${data.aws_region.current.name}.ec2"
|
|
vpc_endpoint_type = "Interface"
|
|
subnet_ids = aws_subnet.private[*].id
|
|
security_group_ids = [aws_security_group.vpc_endpoints.id]
|
|
private_dns_enabled = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-ec2-endpoint"
|
|
})
|
|
}
|
|
|
|
# Security Group for VPC Endpoints
|
|
resource "aws_security_group" "vpc_endpoints" {
|
|
name = "${var.name}-vpc-endpoints"
|
|
description = "Security group for VPC endpoints"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
ingress {
|
|
description = "HTTPS from VPC"
|
|
from_port = 443
|
|
to_port = 443
|
|
protocol = "tcp"
|
|
cidr_blocks = [var.cidr]
|
|
}
|
|
|
|
egress {
|
|
description = "All outbound"
|
|
from_port = 0
|
|
to_port = 0
|
|
protocol = "-1"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${var.name}-vpc-endpoints"
|
|
})
|
|
}
|
|
|
|
data "aws_region" "current" {}
|