Advanced Integration
Modern AWS architectures combine multiple services in complex patterns—EKS clusters with RDS databases, Lambda functions triggered by S3 events, API Gateway integrations with multiple backends. Terraform excels at orchestrating these complex integrations, but you need to understand service dependencies, data flow patterns, and the operational considerations that make these architectures work reliably.
This final part demonstrates advanced integration patterns that bring together everything you’ve learned about AWS and Terraform.
EKS Cluster with Complete Ecosystem
A production-ready EKS cluster with all supporting services:
# EKS Cluster
resource "aws_eks_cluster" "main" {
name = var.cluster_name
role_arn = aws_iam_role.eks_cluster.arn
version = var.kubernetes_version
vpc_config {
subnet_ids = concat(var.private_subnet_ids, var.public_subnet_ids)
endpoint_private_access = true
endpoint_public_access = var.enable_public_access
public_access_cidrs = var.public_access_cidrs
security_group_ids = [aws_security_group.eks_cluster.id]
}
encryption_config {
provider {
key_arn = aws_kms_key.eks.arn
}
resources = ["secrets"]
}
enabled_cluster_log_types = [
"api", "audit", "authenticator", "controllerManager", "scheduler"
]
depends_on = [
aws_iam_role_policy_attachment.eks_cluster_policy,
aws_iam_role_policy_attachment.eks_vpc_resource_controller,
aws_cloudwatch_log_group.eks_cluster
]
tags = var.tags
}
# EKS Node Groups with mixed instance types
resource "aws_eks_node_group" "main" {
for_each = var.node_groups
cluster_name = aws_eks_cluster.main.name
node_group_name = each.key
node_role_arn = aws_iam_role.eks_node_group.arn
subnet_ids = var.private_subnet_ids
capacity_type = each.value.capacity_type
instance_types = each.value.instance_types
ami_type = each.value.ami_type
disk_size = each.value.disk_size
scaling_config {
desired_size = each.value.desired_size
max_size = each.value.max_size
min_size = each.value.min_size
}
update_config {
max_unavailable_percentage = 25
}
# Launch template for advanced configuration
launch_template {
id = aws_launch_template.eks_nodes[each.key].id
version = aws_launch_template.eks_nodes[each.key].latest_version
}
labels = merge(each.value.labels, {
"node-group" = each.key
})
dynamic "taint" {
for_each = each.value.taints
content {
key = taint.value.key
value = taint.value.value
effect = taint.value.effect
}
}
tags = merge(var.tags, {
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
})
depends_on = [
aws_iam_role_policy_attachment.eks_worker_node_policy,
aws_iam_role_policy_attachment.eks_cni_policy,
aws_iam_role_policy_attachment.eks_container_registry_policy
]
}
# Launch template for EKS nodes
resource "aws_launch_template" "eks_nodes" {
for_each = var.node_groups
name_prefix = "${var.cluster_name}-${each.key}-"
vpc_security_group_ids = [aws_security_group.eks_nodes.id]
user_data = base64encode(templatefile("${path.module}/user_data.sh", {
cluster_name = var.cluster_name
cluster_endpoint = aws_eks_cluster.main.endpoint
cluster_ca = aws_eks_cluster.main.certificate_authority[0].data
bootstrap_arguments = each.value.bootstrap_arguments
}))
tag_specifications {
resource_type = "instance"
tags = merge(var.tags, {
Name = "${var.cluster_name}-${each.key}-node"
})
}
lifecycle {
create_before_destroy = true
}
}
# EKS Add-ons
resource "aws_eks_addon" "addons" {
for_each = var.eks_addons
cluster_name = aws_eks_cluster.main.name
addon_name = each.key
addon_version = each.value.version
resolve_conflicts = "OVERWRITE"
service_account_role_arn = each.value.service_account_role_arn
tags = var.tags
}
# OIDC Identity Provider for service accounts
data "tls_certificate" "eks_oidc" {
url = aws_eks_cluster.main.identity[0].oidc[0].issuer
}
resource "aws_iam_openid_connect_provider" "eks_oidc" {
client_id_list = ["sts.amazonaws.com"]
thumbprint_list = [data.tls_certificate.eks_oidc.certificates[0].sha1_fingerprint]
url = aws_eks_cluster.main.identity[0].oidc[0].issuer
tags = var.tags
}
# Service account roles for common services
resource "aws_iam_role" "aws_load_balancer_controller" {
name = "${var.cluster_name}-aws-load-balancer-controller"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRoleWithWebIdentity"
Effect = "Allow"
Principal = {
Federated = aws_iam_openid_connect_provider.eks_oidc.arn
}
Condition = {
StringEquals = {
"${replace(aws_iam_openid_connect_provider.eks_oidc.url, "https://", "")}:sub" = "system:serviceaccount:kube-system:aws-load-balancer-controller"
"${replace(aws_iam_openid_connect_provider.eks_oidc.url, "https://", "")}:aud" = "sts.amazonaws.com"
}
}
}
]
})
tags = var.tags
}
resource "aws_iam_role_policy_attachment" "aws_load_balancer_controller" {
policy_arn = "arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess"
role = aws_iam_role.aws_load_balancer_controller.name
}
Serverless Application with API Gateway
A complete serverless application with API Gateway, Lambda, and DynamoDB:
# API Gateway REST API
resource "aws_api_gateway_rest_api" "main" {
name = var.api_name
description = "Serverless API for ${var.application_name}"
endpoint_configuration {
types = ["REGIONAL"]
}
tags = var.tags
}
# API Gateway Resources and Methods
resource "aws_api_gateway_resource" "users" {
rest_api_id = aws_api_gateway_rest_api.main.id
parent_id = aws_api_gateway_rest_api.main.root_resource_id
path_part = "users"
}
resource "aws_api_gateway_resource" "user_id" {
rest_api_id = aws_api_gateway_rest_api.main.id
parent_id = aws_api_gateway_resource.users.id
path_part = "{id}"
}
# Lambda functions for different operations
resource "aws_lambda_function" "api_functions" {
for_each = var.lambda_functions
filename = each.value.filename
function_name = "${var.application_name}-${each.key}"
role = aws_iam_role.lambda_execution.arn
handler = each.value.handler
runtime = each.value.runtime
timeout = each.value.timeout
memory_size = each.value.memory_size
environment {
variables = merge(each.value.environment_variables, {
DYNAMODB_TABLE = aws_dynamodb_table.main.name
REGION = data.aws_region.current.name
})
}
vpc_config {
subnet_ids = var.lambda_subnet_ids
security_group_ids = [aws_security_group.lambda.id]
}
dead_letter_config {
target_arn = aws_sqs_queue.dlq.arn
}
tags = var.tags
}
# API Gateway Methods and Integrations
resource "aws_api_gateway_method" "users_get" {
rest_api_id = aws_api_gateway_rest_api.main.id
resource_id = aws_api_gateway_resource.users.id
http_method = "GET"
authorization = "AWS_IAM"
request_parameters = {
"method.request.querystring.limit" = false
"method.request.querystring.offset" = false
}
}
resource "aws_api_gateway_integration" "users_get" {
rest_api_id = aws_api_gateway_rest_api.main.id
resource_id = aws_api_gateway_resource.users.id
http_method = aws_api_gateway_method.users_get.http_method
integration_http_method = "POST"
type = "AWS_PROXY"
uri = aws_lambda_function.api_functions["list_users"].invoke_arn
}
# API Gateway Deployment
resource "aws_api_gateway_deployment" "main" {
depends_on = [
aws_api_gateway_integration.users_get,
# Add other integrations here
]
rest_api_id = aws_api_gateway_rest_api.main.id
stage_name = var.api_stage
variables = {
deployed_at = timestamp()
}
lifecycle {
create_before_destroy = true
}
}
# API Gateway Stage with logging and throttling
resource "aws_api_gateway_stage" "main" {
deployment_id = aws_api_gateway_deployment.main.id
rest_api_id = aws_api_gateway_rest_api.main.id
stage_name = var.api_stage
access_log_settings {
destination_arn = aws_cloudwatch_log_group.api_gateway.arn
format = jsonencode({
requestId = "$context.requestId"
ip = "$context.identity.sourceIp"
caller = "$context.identity.caller"
user = "$context.identity.user"
requestTime = "$context.requestTime"
httpMethod = "$context.httpMethod"
resourcePath = "$context.resourcePath"
status = "$context.status"
protocol = "$context.protocol"
responseLength = "$context.responseLength"
})
}
xray_tracing_enabled = true
tags = var.tags
}
# API Gateway Method Settings
resource "aws_api_gateway_method_settings" "main" {
rest_api_id = aws_api_gateway_rest_api.main.id
stage_name = aws_api_gateway_stage.main.stage_name
method_path = "*/*"
settings {
metrics_enabled = true
logging_level = "INFO"
throttling_rate_limit = var.api_throttling_rate_limit
throttling_burst_limit = var.api_throttling_burst_limit
}
}
# DynamoDB Table with Global Secondary Indexes
resource "aws_dynamodb_table" "main" {
name = "${var.application_name}-data"
billing_mode = "PAY_PER_REQUEST"
hash_key = "id"
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
attribute {
name = "id"
type = "S"
}
attribute {
name = "email"
type = "S"
}
attribute {
name = "created_at"
type = "S"
}
global_secondary_index {
name = "email-index"
hash_key = "email"
}
global_secondary_index {
name = "created-at-index"
hash_key = "created_at"
}
server_side_encryption {
enabled = true
kms_key_arn = aws_kms_key.dynamodb.arn
}
point_in_time_recovery {
enabled = true
}
tags = var.tags
}
# DynamoDB Stream Lambda Trigger
resource "aws_lambda_event_source_mapping" "dynamodb_stream" {
event_source_arn = aws_dynamodb_table.main.stream_arn
function_name = aws_lambda_function.api_functions["stream_processor"].arn
starting_position = "LATEST"
maximum_batching_window_in_seconds = 5
batch_size = 10
parallelization_factor = 2
}
Data Pipeline with S3, Lambda, and RDS
A data processing pipeline that demonstrates event-driven architecture:
# S3 Bucket for data ingestion
resource "aws_s3_bucket" "data_ingestion" {
bucket = "${var.application_name}-data-ingestion-${random_id.bucket_suffix.hex}"
tags = var.tags
}
resource "aws_s3_bucket_notification" "data_ingestion" {
bucket = aws_s3_bucket.data_ingestion.id
lambda_function {
lambda_function_arn = aws_lambda_function.data_processor.arn
events = ["s3:ObjectCreated:*"]
filter_prefix = "incoming/"
filter_suffix = ".json"
}
depends_on = [aws_lambda_permission.s3_invoke]
}
# Lambda function for data processing
resource "aws_lambda_function" "data_processor" {
filename = "data_processor.zip"
function_name = "${var.application_name}-data-processor"
role = aws_iam_role.data_processor.arn
handler = "index.handler"
runtime = "python3.9"
timeout = 300
memory_size = 1024
environment {
variables = {
RDS_ENDPOINT = aws_db_instance.analytics.endpoint
RDS_DATABASE = aws_db_instance.analytics.db_name
S3_BUCKET = aws_s3_bucket.processed_data.bucket
SQS_QUEUE = aws_sqs_queue.processing_queue.url
}
}
vpc_config {
subnet_ids = var.lambda_subnet_ids
security_group_ids = [aws_security_group.lambda_data_processor.id]
}
dead_letter_config {
target_arn = aws_sqs_queue.processing_dlq.arn
}
tags = var.tags
}
# RDS Instance for analytics
resource "aws_db_instance" "analytics" {
identifier = "${var.application_name}-analytics"
engine = "postgres"
engine_version = "14.9"
instance_class = var.analytics_db_instance_class
allocated_storage = var.analytics_db_storage
max_allocated_storage = var.analytics_db_max_storage
storage_type = "gp3"
storage_encrypted = true
kms_key_id = aws_kms_key.rds.arn
db_name = "analytics"
username = "analytics_user"
password = random_password.analytics_db.result
db_subnet_group_name = aws_db_subnet_group.analytics.name
vpc_security_group_ids = [aws_security_group.analytics_db.id]
backup_retention_period = 7
backup_window = "03:00-04:00"
maintenance_window = "sun:04:00-sun:05:00"
multi_az = var.environment == "production"
monitoring_interval = 60
monitoring_role_arn = aws_iam_role.rds_monitoring.arn
performance_insights_enabled = true
performance_insights_kms_key_id = aws_kms_key.rds.arn
deletion_protection = var.environment == "production"
skip_final_snapshot = var.environment != "production"
tags = var.tags
}
# SQS Queue for processing coordination
resource "aws_sqs_queue" "processing_queue" {
name = "${var.application_name}-processing-queue"
delay_seconds = 0
max_message_size = 262144
message_retention_seconds = 1209600 # 14 days
receive_wait_time_seconds = 20
redrive_policy = jsonencode({
deadLetterTargetArn = aws_sqs_queue.processing_dlq.arn
maxReceiveCount = 3
})
tags = var.tags
}
resource "aws_sqs_queue" "processing_dlq" {
name = "${var.application_name}-processing-dlq"
tags = var.tags
}
# EventBridge for workflow orchestration
resource "aws_cloudwatch_event_rule" "data_processing_workflow" {
name = "${var.application_name}-data-processing-workflow"
description = "Orchestrate data processing workflow"
event_pattern = jsonencode({
source = ["custom.dataprocessing"]
detail-type = ["Data Processing Complete"]
})
tags = var.tags
}
resource "aws_cloudwatch_event_target" "start_analytics" {
rule = aws_cloudwatch_event_rule.data_processing_workflow.name
target_id = "StartAnalyticsTarget"
arn = aws_lambda_function.analytics_processor.arn
}
# Step Functions for complex workflows
resource "aws_sfn_state_machine" "data_pipeline" {
name = "${var.application_name}-data-pipeline"
role_arn = aws_iam_role.step_functions.arn
definition = jsonencode({
Comment = "Data processing pipeline"
StartAt = "ProcessData"
States = {
ProcessData = {
Type = "Task"
Resource = aws_lambda_function.data_processor.arn
Next = "CheckProcessingResult"
Retry = [
{
ErrorEquals = ["Lambda.ServiceException", "Lambda.AWSLambdaException", "Lambda.SdkClientException"]
IntervalSeconds = 2
MaxAttempts = 6
BackoffRate = 2
}
]
}
CheckProcessingResult = {
Type = "Choice"
Choices = [
{
Variable = "$.status"
StringEquals = "SUCCESS"
Next = "RunAnalytics"
}
]
Default = "ProcessingFailed"
}
RunAnalytics = {
Type = "Task"
Resource = aws_lambda_function.analytics_processor.arn
End = true
}
ProcessingFailed = {
Type = "Fail"
Cause = "Data processing failed"
}
}
})
tags = var.tags
}
Multi-Service Integration with Service Discovery
Complex service integration using AWS Cloud Map:
# Service Discovery Namespace
resource "aws_service_discovery_private_dns_namespace" "main" {
name = "${var.application_name}.local"
description = "Service discovery for ${var.application_name}"
vpc = var.vpc_id
tags = var.tags
}
# Service Discovery Services
resource "aws_service_discovery_service" "services" {
for_each = var.services
name = each.key
dns_config {
namespace_id = aws_service_discovery_private_dns_namespace.main.id
dns_records {
ttl = 10
type = "A"
}
routing_policy = "MULTIVALUE"
}
health_check_grace_period_seconds = 30
tags = var.tags
}
# ECS Services with Service Discovery
resource "aws_ecs_service" "microservices" {
for_each = var.services
name = each.key
cluster = aws_ecs_cluster.main.id
task_definition = aws_ecs_task_definition.microservices[each.key].arn
desired_count = each.value.desired_count
network_configuration {
security_groups = [aws_security_group.microservices[each.key].id]
subnets = var.private_subnet_ids
}
service_registries {
registry_arn = aws_service_discovery_service.services[each.key].arn
}
load_balancer {
target_group_arn = aws_lb_target_group.microservices[each.key].arn
container_name = each.key
container_port = each.value.port
}
depends_on = [aws_lb_listener.microservices]
tags = var.tags
}
# Application Load Balancer with path-based routing
resource "aws_lb" "microservices" {
name = "${var.application_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.alb.id]
subnets = var.public_subnet_ids
enable_deletion_protection = var.environment == "production"
tags = var.tags
}
resource "aws_lb_listener" "microservices" {
load_balancer_arn = aws_lb.microservices.arn
port = "443"
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-TLS-1-2-2017-01"
certificate_arn = var.certificate_arn
default_action {
type = "fixed-response"
fixed_response {
content_type = "text/plain"
message_body = "Service not found"
status_code = "404"
}
}
}
# Listener rules for path-based routing
resource "aws_lb_listener_rule" "microservices" {
for_each = var.services
listener_arn = aws_lb_listener.microservices.arn
priority = each.value.priority
action {
type = "forward"
target_group_arn = aws_lb_target_group.microservices[each.key].arn
}
condition {
path_pattern {
values = each.value.path_patterns
}
}
}
Final Integration Example
A complete example that brings together multiple services:
# Main application module that uses all components
module "complete_application" {
source = "./modules/complete-application"
# Basic configuration
application_name = "my-app"
environment = "production"
# Network configuration
vpc_id = module.vpc.vpc_id
private_subnet_ids = module.vpc.private_subnet_ids
public_subnet_ids = module.vpc.public_subnet_ids
# EKS configuration
enable_eks = true
eks_config = {
kubernetes_version = "1.28"
node_groups = {
general = {
instance_types = ["t3.medium", "t3.large"]
capacity_type = "ON_DEMAND"
desired_size = 3
max_size = 10
min_size = 1
}
spot = {
instance_types = ["t3.medium", "t3.large", "t3.xlarge"]
capacity_type = "SPOT"
desired_size = 2
max_size = 20
min_size = 0
}
}
}
# Database configuration
database_config = {
engine = "postgres"
instance_class = "db.r5.large"
multi_az = true
backup_retention_period = 7
}
# Serverless configuration
enable_serverless = true
lambda_functions = {
api_handler = {
runtime = "python3.9"
handler = "app.handler"
memory_size = 512
timeout = 30
}
data_processor = {
runtime = "python3.9"
handler = "processor.handler"
memory_size = 1024
timeout = 300
}
}
# Monitoring configuration
monitoring_config = {
enable_detailed_monitoring = true
log_retention_days = 30
enable_xray_tracing = true
}
# Security configuration
security_config = {
enable_guardduty = true
enable_security_hub = true
enable_config = true
}
tags = {
Environment = "production"
Project = "my-app"
ManagedBy = "terraform"
}
}
Conclusion
This comprehensive guide has covered the essential patterns for using Terraform with AWS, from basic provider setup to complex multi-service architectures. The key to success with AWS and Terraform is understanding not just the individual services, but how they work together to create reliable, scalable, and secure systems.
The patterns and practices covered in this guide provide a foundation for building production-ready AWS infrastructure that scales with your organization’s needs while maintaining security, compliance, and operational excellence.