Advanced Docker Compose Techniques and Patterns
This section explores sophisticated Docker Compose patterns for enterprise applications, including service mesh integration, advanced networking, and complex orchestration scenarios.
Service Mesh Integration with Envoy
version: '3.8'
services:
# Envoy Proxy as Service Mesh
envoy:
image: envoyproxy/envoy:v1.24.0
ports:
- "10000:10000"
- "9901:9901"
volumes:
- ./envoy.yaml:/etc/envoy/envoy.yaml
command: /usr/local/bin/envoy -c /etc/envoy/envoy.yaml
# Service A with Sidecar
service-a:
build: ./service-a
environment:
- SERVICE_NAME=service-a
- ENVOY_ADMIN_PORT=9901
depends_on:
- envoy
service-a-envoy:
image: envoyproxy/envoy:v1.24.0
volumes:
- ./envoy-sidecar-a.yaml:/etc/envoy/envoy.yaml
network_mode: "service:service-a"
depends_on:
- service-a
# Service B with Sidecar
service-b:
build: ./service-b
environment:
- SERVICE_NAME=service-b
depends_on:
- envoy
service-b-envoy:
image: envoyproxy/envoy:v1.24.0
volumes:
- ./envoy-sidecar-b.yaml:/etc/envoy/envoy.yaml
network_mode: "service:service-b"
depends_on:
- service-b
networks:
default:
driver: bridge
Advanced Networking Patterns
Multi-Tier Network Architecture
version: '3.8'
services:
# Load Balancer Tier
haproxy:
image: haproxy:2.6
ports:
- "80:80"
- "443:443"
- "8404:8404" # Stats
volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
networks:
- frontend
depends_on:
- web1
- web2
# Web Tier
web1:
build: ./web
networks:
- frontend
- backend
environment:
- INSTANCE_ID=web1
web2:
build: ./web
networks:
- frontend
- backend
environment:
- INSTANCE_ID=web2
# Application Tier
app1:
build: ./app
networks:
- backend
- database
environment:
- INSTANCE_ID=app1
app2:
build: ./app
networks:
- backend
- database
environment:
- INSTANCE_ID=app2
# Database Tier
db-master:
image: postgres:13
networks:
- database
environment:
- POSTGRES_REPLICATION_MODE=master
- POSTGRES_REPLICATION_USER=replicator
- POSTGRES_REPLICATION_PASSWORD=replicator_password
volumes:
- db_master_data:/var/lib/postgresql/data
db-slave:
image: postgres:13
networks:
- database
environment:
- POSTGRES_REPLICATION_MODE=slave
- POSTGRES_MASTER_HOST=db-master
- POSTGRES_REPLICATION_USER=replicator
- POSTGRES_REPLICATION_PASSWORD=replicator_password
depends_on:
- db-master
networks:
frontend:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/24
backend:
driver: bridge
internal: true
ipam:
config:
- subnet: 172.21.0.0/24
database:
driver: bridge
internal: true
ipam:
config:
- subnet: 172.22.0.0/24
volumes:
db_master_data:
Network Policies and Security
version: '3.8'
services:
# DMZ Services
nginx:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
networks:
dmz:
ipv4_address: 172.30.1.10
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
read_only: true
tmpfs:
- /var/cache/nginx:noexec,nosuid,size=100m
# Application Services
api:
build: ./api
networks:
app_tier:
ipv4_address: 172.30.2.10
security_opt:
- no-new-privileges:true
user: "1000:1000"
read_only: true
tmpfs:
- /tmp:noexec,nosuid,size=50m
# Database Services
postgres:
image: postgres:13
networks:
data_tier:
ipv4_address: 172.30.3.10
security_opt:
- no-new-privileges:true
user: postgres
volumes:
- postgres_data:/var/lib/postgresql/data:Z
networks:
dmz:
driver: bridge
ipam:
config:
- subnet: 172.30.1.0/24
app_tier:
driver: bridge
internal: true
ipam:
config:
- subnet: 172.30.2.0/24
data_tier:
driver: bridge
internal: true
ipam:
config:
- subnet: 172.30.3.0/24
volumes:
postgres_data:
Complex Orchestration Patterns
Event-Driven Architecture
version: '3.8'
services:
# Event Bus
kafka:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
depends_on:
- zookeeper
zookeeper:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
# Event Producers
order-service:
build: ./services/order
environment:
- KAFKA_BROKERS=kafka:9092
- DATABASE_URL=postgresql://user:pass@order-db:5432/orders
depends_on:
- kafka
- order-db
# Event Consumers
inventory-service:
build: ./services/inventory
environment:
- KAFKA_BROKERS=kafka:9092
- DATABASE_URL=postgresql://user:pass@inventory-db:5432/inventory
depends_on:
- kafka
- inventory-db
notification-service:
build: ./services/notification
environment:
- KAFKA_BROKERS=kafka:9092
- SMTP_HOST=mailhog
- SMTP_PORT=1025
depends_on:
- kafka
- mailhog
# Event Processing
analytics-processor:
build: ./processors/analytics
environment:
- KAFKA_BROKERS=kafka:9092
- ELASTICSEARCH_URL=http://elasticsearch:9200
depends_on:
- kafka
- elasticsearch
# Supporting Services
order-db:
image: postgres:13
environment:
POSTGRES_DB: orders
POSTGRES_USER: user
POSTGRES_PASSWORD: pass
inventory-db:
image: postgres:13
environment:
POSTGRES_DB: inventory
POSTGRES_USER: user
POSTGRES_PASSWORD: pass
elasticsearch:
image: elasticsearch:7.17.0
environment:
- discovery.type=single-node
mailhog:
image: mailhog/mailhog
ports:
- "8025:8025"
CQRS Pattern Implementation
version: '3.8'
services:
# Command Side
command-api:
build: ./command-api
ports:
- "8080:8080"
environment:
- DATABASE_URL=postgresql://user:pass@write-db:5432/commands
- EVENT_STORE_URL=http://eventstore:2113
depends_on:
- write-db
- eventstore
# Query Side
query-api:
build: ./query-api
ports:
- "8081:8081"
environment:
- DATABASE_URL=postgresql://user:pass@read-db:5432/queries
- REDIS_URL=redis://redis:6379
depends_on:
- read-db
- redis
# Event Store
eventstore:
image: eventstore/eventstore:21.10.0-buster-slim
ports:
- "2113:2113"
environment:
- EVENTSTORE_CLUSTER_SIZE=1
- EVENTSTORE_RUN_PROJECTIONS=All
- EVENTSTORE_START_STANDARD_PROJECTIONS=true
volumes:
- eventstore_data:/var/lib/eventstore
# Projection Processors
projection-processor:
build: ./projection-processor
environment:
- EVENT_STORE_URL=http://eventstore:2113
- READ_DATABASE_URL=postgresql://user:pass@read-db:5432/queries
depends_on:
- eventstore
- read-db
# Databases
write-db:
image: postgres:13
environment:
POSTGRES_DB: commands
POSTGRES_USER: user
POSTGRES_PASSWORD: pass
volumes:
- write_db_data:/var/lib/postgresql/data
read-db:
image: postgres:13
environment:
POSTGRES_DB: queries
POSTGRES_USER: user
POSTGRES_PASSWORD: pass
volumes:
- read_db_data:/var/lib/postgresql/data
redis:
image: redis:7-alpine
volumes:
- redis_data:/data
volumes:
eventstore_data:
write_db_data:
read_db_data:
redis_data:
Advanced Volume and Storage Patterns
Distributed Storage with GlusterFS
version: '3.8'
services:
# GlusterFS Nodes
gluster1:
image: gluster/gluster-centos
privileged: true
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- gluster1_data:/data
hostname: gluster1
networks:
storage:
ipv4_address: 172.25.0.10
gluster2:
image: gluster/gluster-centos
privileged: true
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- gluster2_data:/data
hostname: gluster2
networks:
storage:
ipv4_address: 172.25.0.11
# Application using distributed storage
app:
build: ./app
volumes:
- type: volume
source: distributed_storage
target: /app/data
volume:
driver: local
driver_opts:
type: glusterfs
o: "addr=172.25.0.10,addr=172.25.0.11"
device: "gluster-volume"
depends_on:
- gluster1
- gluster2
networks:
- storage
- app
networks:
storage:
driver: bridge
ipam:
config:
- subnet: 172.25.0.0/24
app:
driver: bridge
volumes:
gluster1_data:
gluster2_data:
distributed_storage:
external: true
Backup and Disaster Recovery
version: '3.8'
services:
# Primary Application
app:
build: ./app
volumes:
- app_data:/data
environment:
- BACKUP_ENABLED=true
- BACKUP_SCHEDULE=0 2 * * *
# Backup Service
backup:
image: alpine
volumes:
- app_data:/source:ro
- backup_storage:/backup
- ./backup-scripts:/scripts:ro
environment:
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
- S3_BUCKET=${BACKUP_S3_BUCKET}
command: |
sh -c "
apk add --no-cache aws-cli
while true; do
/scripts/backup.sh
sleep 86400
done
"
# Disaster Recovery Testing
dr-test:
build: ./app
volumes:
- dr_data:/data
- backup_storage:/backup:ro
environment:
- RESTORE_MODE=true
profiles:
- disaster-recovery
command: |
sh -c "
echo 'Starting disaster recovery test...'
/scripts/restore.sh
/scripts/verify.sh
"
volumes:
app_data:
backup_storage:
dr_data:
Performance Optimization Patterns
Connection Pooling and Caching
version: '3.8'
services:
# Application with Connection Pooling
app:
build: ./app
environment:
- DATABASE_URL=postgresql://user:pass@pgbouncer:5432/myapp
- REDIS_URL=redis://redis-cluster:6379
depends_on:
- pgbouncer
- redis-cluster
# PgBouncer Connection Pooler
pgbouncer:
image: pgbouncer/pgbouncer:latest
environment:
- DATABASES_HOST=postgres
- DATABASES_PORT=5432
- DATABASES_USER=user
- DATABASES_PASSWORD=pass
- DATABASES_DBNAME=myapp
- POOL_MODE=transaction
- MAX_CLIENT_CONN=100
- DEFAULT_POOL_SIZE=25
depends_on:
- postgres
# Redis Cluster for Caching
redis-cluster:
image: redis:7-alpine
command: |
sh -c "
redis-server --cluster-enabled yes \
--cluster-config-file nodes.conf \
--cluster-node-timeout 5000 \
--appendonly yes \
--maxmemory 256mb \
--maxmemory-policy allkeys-lru
"
volumes:
- redis_data:/data
# Database
postgres:
image: postgres:13
environment:
POSTGRES_DB: myapp
POSTGRES_USER: user
POSTGRES_PASSWORD: pass
volumes:
- postgres_data:/var/lib/postgresql/data
- ./postgresql.conf:/etc/postgresql/postgresql.conf
command: postgres -c config_file=/etc/postgresql/postgresql.conf
volumes:
redis_data:
postgres_data:
Summary
This section covered advanced Docker Compose techniques:
Enterprise Patterns
- Service Mesh: Envoy proxy integration for microservices communication
- Multi-Tier Architecture: Proper network segmentation and security
- Event-Driven Systems: Kafka-based event processing and CQRS patterns
Advanced Networking
- Network Policies: Security-focused network configuration
- Service Discovery: Complex routing and load balancing
- Network Isolation: DMZ and internal network separation
Storage and Performance
- Distributed Storage: GlusterFS integration for scalable storage
- Disaster Recovery: Automated backup and recovery testing
- Performance Optimization: Connection pooling and caching strategies
Next Steps: Part 5 focuses on best practices and optimization techniques for production-ready Docker Compose deployments, including security hardening, monitoring, and operational excellence.