Practical Security and Optimization Applications

This section demonstrates real-world Docker security implementations, performance optimization case studies, and enterprise monitoring deployments across various scenarios.

Enterprise Security Implementation

Financial Services Security Stack

# docker-compose.financial-security.yml
version: '3.8'

services:
  # Web Application Firewall
  waf:
    image: owasp/modsecurity-crs:nginx
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./waf/nginx.conf:/etc/nginx/nginx.conf:ro
      - ./waf/modsecurity.conf:/etc/modsecurity/modsecurity.conf:ro
      - ./waf/crs-setup.conf:/etc/modsecurity/crs/crs-setup.conf:ro
      - waf-logs:/var/log/nginx
    networks:
      - dmz
    environment:
      - PARANOIA=2
      - ANOMALY_INBOUND=5
      - ANOMALY_OUTBOUND=4

  # Application with strict security
  trading-app:
    build: ./trading-app
    networks:
      - app-tier
    volumes:
      - trading-data:/app/data:ro
      - audit-logs:/app/logs
    environment:
      - ENCRYPTION_KEY_FILE=/run/secrets/encryption_key
      - DATABASE_URL_FILE=/run/secrets/db_connection
      - AUDIT_ENABLED=true
      - COMPLIANCE_MODE=PCI_DSS
    secrets:
      - encryption_key
      - db_connection
    security_opt:
      - no-new-privileges:true
      - apparmor:trading-app-profile
    cap_drop:
      - ALL
    cap_add:
      - CHOWN
      - SETGID
      - SETUID
    read_only: true
    tmpfs:
      - /tmp:rw,noexec,nosuid,size=100m
    user: "1000:1000"
    deploy:
      resources:
        limits:
          memory: 2G
          cpus: '1.0'
        reservations:
          memory: 1G
          cpus: '0.5'

  # Secure Database
  secure-db:
    image: postgres:14
    networks:
      - db-tier
    volumes:
      - secure-db-data:/var/lib/postgresql/data
      - secure-db-config:/etc/postgresql:ro
    environment:
      - POSTGRES_DB=trading
      - POSTGRES_USER=trading_user
      - POSTGRES_PASSWORD_FILE=/run/secrets/db_password
      - POSTGRES_SSL_MODE=require
    secrets:
      - db_password
    command: |
      postgres
      -c ssl=on
      -c ssl_cert_file=/etc/ssl/certs/server.crt
      -c ssl_key_file=/etc/ssl/private/server.key
      -c log_statement=all
      -c log_connections=on
      -c log_disconnections=on
      -c log_checkpoints=on
      -c log_lock_waits=on

  # HSM (Hardware Security Module) Simulator
  hsm:
    image: softhsm:latest
    networks:
      - security-tier
    volumes:
      - hsm-data:/var/lib/softhsm
    environment:
      - SOFTHSM2_CONF=/etc/softhsm2.conf
    security_opt:
      - no-new-privileges:true
    cap_drop:
      - ALL

  # Compliance Scanner
  compliance-scanner:
    build: ./compliance-scanner
    networks:
      - monitoring
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - compliance-reports:/reports
    environment:
      - SCAN_SCHEDULE=0 */6 * * *
      - COMPLIANCE_STANDARDS=PCI_DSS,SOX,GDPR
      - ALERT_WEBHOOK=${COMPLIANCE_WEBHOOK_URL}

networks:
  dmz:
    driver: bridge
    driver_opts:
      com.docker.network.bridge.enable_icc: "false"
  app-tier:
    driver: bridge
    internal: true
  db-tier:
    driver: bridge
    internal: true
  security-tier:
    driver: bridge
    internal: true
  monitoring:
    driver: bridge

volumes:
  waf-logs:
  trading-data:
  audit-logs:
  secure-db-data:
    driver: local
    driver_opts:
      type: ext4
      o: noatime,nodev,nosuid
  secure-db-config:
  hsm-data:
  compliance-reports:

secrets:
  encryption_key:
    external: true
  db_connection:
    external: true
  db_password:
    external: true

High-Performance Computing Optimization

Scientific Computing Stack

# docker-compose.hpc-optimized.yml
version: '3.8'

services:
  # Compute Node with GPU Support
  compute-node:
    build: ./compute-node
    runtime: nvidia
    environment:
      - NVIDIA_VISIBLE_DEVICES=all
      - CUDA_VISIBLE_DEVICES=0,1
      - OMP_NUM_THREADS=16
      - MKL_NUM_THREADS=16
    volumes:
      - compute-data:/data
      - compute-scratch:/scratch
      - type: tmpfs
        target: /tmp
        tmpfs:
          size: 8G
    networks:
      - compute-network
    deploy:
      resources:
        limits:
          memory: 32G
          cpus: '16.0'
        reservations:
          memory: 16G
          cpus: '8.0'
    sysctls:
      - kernel.shmmax=68719476736
      - kernel.shmall=4294967296
    ulimits:
      memlock:
        soft: -1
        hard: -1
      stack:
        soft: 67108864
        hard: 67108864

  # High-Performance Storage
  storage-node:
    image: gluster/gluster-centos
    privileged: true
    networks:
      - storage-network
    volumes:
      - gluster-data:/data
      - /sys/fs/cgroup:/sys/fs/cgroup:ro
    environment:
      - GLUSTER_VOLUME_NAME=compute-volume
      - GLUSTER_REPLICA_COUNT=3

  # Message Passing Interface (MPI) Coordinator
  mpi-coordinator:
    build: ./mpi-coordinator
    networks:
      - compute-network
    volumes:
      - mpi-config:/etc/mpi
    environment:
      - MPI_HOSTS=compute-node-1,compute-node-2,compute-node-3
      - MPI_SLOTS_PER_HOST=16
    command: |
      mpirun --allow-run-as-root \
             --hostfile /etc/mpi/hostfile \
             --np 48 \
             --map-by node \
             --bind-to core \
             /app/compute-job

  # Performance Monitor
  perf-monitor:
    build: ./perf-monitor
    privileged: true
    networks:
      - monitoring
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - perf-data:/data
    environment:
      - MONITOR_INTERVAL=1
      - METRICS=cpu,memory,network,gpu,storage
    command: |
      sh -c "
        while true; do
          perf stat -a -e cycles,instructions,cache-misses,branch-misses sleep 1
          nvidia-smi --query-gpu=utilization.gpu,memory.used --format=csv,noheader,nounits
          iostat -x 1 1
        done
      "

networks:
  compute-network:
    driver: bridge
    driver_opts:
      com.docker.network.bridge.name: compute-br
      com.docker.network.mtu: 9000
  storage-network:
    driver: bridge
    driver_opts:
      com.docker.network.mtu: 9000
  monitoring:
    driver: bridge

volumes:
  compute-data:
    driver: local
    driver_opts:
      type: ext4
      o: noatime,nodiratime,data=writeback
  compute-scratch:
    driver: local
    driver_opts:
      type: tmpfs
      device: tmpfs
      o: size=16G,noatime
  gluster-data:
  mpi-config:
  perf-data:

Real-Time Security Monitoring

Security Operations Center (SOC)

#!/usr/bin/env python3
# soc-monitor.py

import asyncio
import docker
import json
import logging
import websockets
from datetime import datetime
from typing import Dict, List
import aioredis

class SOCMonitor:
    def __init__(self):
        self.docker_client = docker.from_env()
        self.redis = None
        self.websocket_clients = set()
        self.security_rules = self.load_security_rules()
        
    async def initialize(self):
        """Initialize async components"""
        self.redis = await aioredis.from_url("redis://localhost:6379")
        
    def load_security_rules(self) -> Dict:
        """Load security detection rules"""
        return {
            "suspicious_processes": [
                "nc", "netcat", "nmap", "wget", "curl", "python", "perl", "ruby"
            ],
            "suspicious_network": [
                {"port": 22, "protocol": "tcp", "direction": "outbound"},
                {"port": 3389, "protocol": "tcp", "direction": "outbound"},
                {"port": 4444, "protocol": "tcp", "direction": "any"}
            ],
            "file_integrity": [
                "/etc/passwd", "/etc/shadow", "/etc/hosts", "/etc/crontab"
            ],
            "resource_thresholds": {
                "cpu_percent": 90,
                "memory_percent": 95,
                "network_connections": 1000
            }
        }
    
    async def monitor_containers(self):
        """Monitor container security events"""
        while True:
            try:
                for container in self.docker_client.containers.list():
                    await self.analyze_container_security(container)
                await asyncio.sleep(5)
            except Exception as e:
                logging.error(f"Container monitoring error: {e}")
                await asyncio.sleep(10)
    
    async def analyze_container_security(self, container):
        """Analyze individual container for security issues"""
        try:
            # Check running processes
            processes = await self.get_container_processes(container)
            await self.check_suspicious_processes(container, processes)
            
            # Check network connections
            connections = await self.get_network_connections(container)
            await self.check_suspicious_network(container, connections)
            
            # Check resource usage
            stats = container.stats(stream=False)
            await self.check_resource_anomalies(container, stats)
            
            # Check file integrity
            await self.check_file_integrity(container)
            
        except Exception as e:
            logging.error(f"Error analyzing container {container.name}: {e}")
    
    async def get_container_processes(self, container) -> List[Dict]:
        """Get running processes in container"""
        try:
            result = container.exec_run("ps aux", demux=True)
            if result.exit_code == 0:
                lines = result.output[0].decode().strip().split('\n')[1:]  # Skip header
                processes = []
                for line in lines:
                    parts = line.split(None, 10)
                    if len(parts) >= 11:
                        processes.append({
                            'user': parts[0],
                            'pid': parts[1],
                            'cpu': parts[2],
                            'mem': parts[3],
                            'command': parts[10]
                        })
                return processes
        except Exception as e:
            logging.error(f"Failed to get processes for {container.name}: {e}")
        return []
    
    async def check_suspicious_processes(self, container, processes: List[Dict]):
        """Check for suspicious processes"""
        for process in processes:
            command = process['command'].lower()
            for suspicious_cmd in self.security_rules["suspicious_processes"]:
                if suspicious_cmd in command:
                    await self.create_security_alert({
                        'type': 'suspicious_process',
                        'severity': 'medium',
                        'container': container.name,
                        'process': process,
                        'description': f"Suspicious process detected: {suspicious_cmd}"
                    })
    
    async def get_network_connections(self, container) -> List[Dict]:
        """Get network connections from container"""
        try:
            result = container.exec_run("netstat -tuln", demux=True)
            if result.exit_code == 0:
                lines = result.output[0].decode().strip().split('\n')
                connections = []
                for line in lines:
                    if 'LISTEN' in line or 'ESTABLISHED' in line:
                        parts = line.split()
                        if len(parts) >= 4:
                            connections.append({
                                'protocol': parts[0],
                                'local_address': parts[3],
                                'state': parts[5] if len(parts) > 5 else 'UNKNOWN'
                            })
                return connections
        except Exception as e:
            logging.error(f"Failed to get connections for {container.name}: {e}")
        return []
    
    async def check_suspicious_network(self, container, connections: List[Dict]):
        """Check for suspicious network activity"""
        for conn in connections:
            local_addr = conn['local_address']
            if ':' in local_addr:
                port = int(local_addr.split(':')[-1])
                for rule in self.security_rules["suspicious_network"]:
                    if port == rule['port']:
                        await self.create_security_alert({
                            'type': 'suspicious_network',
                            'severity': 'high',
                            'container': container.name,
                            'connection': conn,
                            'description': f"Suspicious network activity on port {port}"
                        })
    
    async def check_resource_anomalies(self, container, stats: Dict):
        """Check for resource usage anomalies"""
        try:
            # Calculate CPU percentage
            cpu_delta = stats['cpu_stats']['cpu_usage']['total_usage'] - \
                       stats['precpu_stats']['cpu_usage']['total_usage']
            system_delta = stats['cpu_stats']['system_cpu_usage'] - \
                          stats['precpu_stats']['system_cpu_usage']
            cpu_percent = (cpu_delta / system_delta) * 100.0
            
            # Calculate memory percentage
            memory_usage = stats['memory_stats']['usage']
            memory_limit = stats['memory_stats']['limit']
            memory_percent = (memory_usage / memory_limit) * 100.0
            
            # Check thresholds
            thresholds = self.security_rules["resource_thresholds"]
            
            if cpu_percent > thresholds["cpu_percent"]:
                await self.create_security_alert({
                    'type': 'resource_anomaly',
                    'severity': 'medium',
                    'container': container.name,
                    'metric': 'cpu',
                    'value': cpu_percent,
                    'threshold': thresholds["cpu_percent"],
                    'description': f"High CPU usage: {cpu_percent:.2f}%"
                })
            
            if memory_percent > thresholds["memory_percent"]:
                await self.create_security_alert({
                    'type': 'resource_anomaly',
                    'severity': 'high',
                    'container': container.name,
                    'metric': 'memory',
                    'value': memory_percent,
                    'threshold': thresholds["memory_percent"],
                    'description': f"High memory usage: {memory_percent:.2f}%"
                })
                
        except Exception as e:
            logging.error(f"Error checking resource anomalies: {e}")
    
    async def check_file_integrity(self, container):
        """Check file integrity for critical files"""
        for file_path in self.security_rules["file_integrity"]:
            try:
                # Get file hash
                result = container.exec_run(f"sha256sum {file_path}", demux=True)
                if result.exit_code == 0:
                    current_hash = result.output[0].decode().split()[0]
                    
                    # Check against stored hash
                    stored_hash = await self.redis.get(f"hash:{container.name}:{file_path}")
                    
                    if stored_hash:
                        if current_hash != stored_hash.decode():
                            await self.create_security_alert({
                                'type': 'file_integrity',
                                'severity': 'critical',
                                'container': container.name,
                                'file': file_path,
                                'description': f"File integrity violation: {file_path}"
                            })
                    else:
                        # Store initial hash
                        await self.redis.set(f"hash:{container.name}:{file_path}", current_hash)
                        
            except Exception as e:
                logging.debug(f"File integrity check failed for {file_path}: {e}")
    
    async def create_security_alert(self, alert: Dict):
        """Create and distribute security alert"""
        alert['timestamp'] = datetime.now().isoformat()
        alert['id'] = f"alert_{int(datetime.now().timestamp())}"
        
        # Store in Redis
        await self.redis.lpush("security_alerts", json.dumps(alert))
        await self.redis.ltrim("security_alerts", 0, 999)  # Keep last 1000 alerts
        
        # Log alert
        logging.warning(f"Security Alert: {alert}")
        
        # Send to WebSocket clients
        await self.broadcast_alert(alert)
        
        # Trigger automated response if critical
        if alert['severity'] == 'critical':
            await self.trigger_incident_response(alert)
    
    async def broadcast_alert(self, alert: Dict):
        """Broadcast alert to WebSocket clients"""
        if self.websocket_clients:
            message = json.dumps(alert)
            await asyncio.gather(
                *[client.send(message) for client in self.websocket_clients],
                return_exceptions=True
            )
    
    async def trigger_incident_response(self, alert: Dict):
        """Trigger automated incident response"""
        container_name = alert.get('container')
        if container_name:
            try:
                container = self.docker_client.containers.get(container_name)
                
                # Isolate container (disconnect from networks except monitoring)
                networks = container.attrs['NetworkSettings']['Networks']
                for network_name in networks:
                    if network_name != 'monitoring':
                        network = self.docker_client.networks.get(network_name)
                        network.disconnect(container)
                
                logging.critical(f"Container {container_name} isolated due to critical alert")
                
            except Exception as e:
                logging.error(f"Failed to isolate container {container_name}: {e}")
    
    async def websocket_handler(self, websocket, path):
        """Handle WebSocket connections for real-time alerts"""
        self.websocket_clients.add(websocket)
        try:
            # Send recent alerts
            recent_alerts = await self.redis.lrange("security_alerts", 0, 49)
            for alert_json in recent_alerts:
                await websocket.send(alert_json.decode())
            
            # Keep connection alive
            await websocket.wait_closed()
        finally:
            self.websocket_clients.remove(websocket)
    
    async def run(self):
        """Run the SOC monitor"""
        await self.initialize()
        
        # Start monitoring tasks
        monitor_task = asyncio.create_task(self.monitor_containers())
        
        # Start WebSocket server
        websocket_server = await websockets.serve(
            self.websocket_handler, "localhost", 8765
        )
        
        logging.info("SOC Monitor started")
        
        try:
            await asyncio.gather(monitor_task)
        except KeyboardInterrupt:
            logging.info("SOC Monitor stopped")
        finally:
            websocket_server.close()
            await websocket_server.wait_closed()

if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    monitor = SOCMonitor()
    asyncio.run(monitor.run())

Performance Optimization Case Studies

E-Commerce Platform Optimization

# docker-compose.ecommerce-optimized.yml
version: '3.8'

services:
  # Load Balancer with Connection Pooling
  haproxy:
    image: haproxy:2.6
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
    networks:
      - frontend
    sysctls:
      - net.core.somaxconn=65535
      - net.ipv4.tcp_max_syn_backlog=65535
    deploy:
      resources:
        limits:
          memory: 1G
          cpus: '2.0'

  # Web Frontend with Caching
  web:
    build: ./web-optimized
    networks:
      - frontend
      - backend
    volumes:
      - web-cache:/var/cache/nginx
      - type: tmpfs
        target: /tmp
        tmpfs:
          size: 512M
    environment:
      - NGINX_WORKER_PROCESSES=auto
      - NGINX_WORKER_CONNECTIONS=4096
    sysctls:
      - net.core.rmem_max=134217728
      - net.core.wmem_max=134217728
    deploy:
      replicas: 4
      resources:
        limits:
          memory: 512M
          cpus: '1.0'

  # Application Server with JVM Tuning
  app:
    build: ./app-optimized
    networks:
      - backend
    environment:
      - JAVA_OPTS=-Xms2g -Xmx4g -XX:+UseG1GC -XX:MaxGCPauseMillis=200
      - JVM_OPTS=-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap
    volumes:
      - app-logs:/app/logs
    deploy:
      replicas: 6
      resources:
        limits:
          memory: 6G
          cpus: '2.0'
        reservations:
          memory: 4G
          cpus: '1.0'

  # Database with Performance Tuning
  postgres:
    image: postgres:14
    networks:
      - backend
    volumes:
      - postgres-data:/var/lib/postgresql/data
      - postgres-wal:/var/lib/postgresql/wal
      - type: tmpfs
        target: /tmp
        tmpfs:
          size: 2G
    environment:
      - POSTGRES_INITDB_WALDIR=/var/lib/postgresql/wal
    command: |
      postgres
      -c max_connections=200
      -c shared_buffers=2GB
      -c effective_cache_size=6GB
      -c maintenance_work_mem=512MB
      -c checkpoint_completion_target=0.9
      -c wal_buffers=16MB
      -c default_statistics_target=100
      -c random_page_cost=1.1
      -c effective_io_concurrency=200
      -c work_mem=8MB
      -c min_wal_size=2GB
      -c max_wal_size=8GB
    deploy:
      resources:
        limits:
          memory: 8G
          cpus: '4.0'

  # Redis Cluster for Caching
  redis:
    image: redis:7-alpine
    networks:
      - backend
    volumes:
      - redis-data:/data
    command: |
      redis-server
      --maxmemory 4gb
      --maxmemory-policy allkeys-lru
      --save 900 1
      --save 300 10
      --save 60 10000
      --tcp-backlog 511
      --tcp-keepalive 300
    sysctls:
      - net.core.somaxconn=65535
    deploy:
      resources:
        limits:
          memory: 6G
          cpus: '2.0'

networks:
  frontend:
    driver: bridge
    driver_opts:
      com.docker.network.bridge.name: frontend-br
      com.docker.network.mtu: 1500
  backend:
    driver: bridge
    internal: true
    driver_opts:
      com.docker.network.bridge.name: backend-br

volumes:
  web-cache:
  app-logs:
  postgres-data:
    driver: local
    driver_opts:
      type: ext4
      o: noatime,nodiratime
  postgres-wal:
    driver: local
    driver_opts:
      type: ext4
      o: noatime,sync
  redis-data:

Summary

This section demonstrated practical security and optimization applications:

Enterprise Security

  • Financial Services: Comprehensive security stack with WAF, HSM, and compliance scanning
  • Real-Time Monitoring: SOC implementation with automated threat detection and response
  • Zero-Trust Architecture: Policy enforcement and container isolation

Performance Optimization

  • HPC Computing: GPU-accelerated computing with MPI coordination and high-performance storage
  • E-Commerce Platform: Multi-tier optimization with caching, connection pooling, and database tuning
  • Resource Management: Advanced CPU, memory, and network optimization

Monitoring and Response

  • Security Operations: Automated threat detection with process, network, and file integrity monitoring
  • Performance Analytics: Comprehensive metrics collection and anomaly detection
  • Incident Response: Automated container isolation and alert distribution

Key Patterns Applied

  • Defense in Depth: Multiple security layers from network to application level
  • Performance Tuning: Systematic optimization across all infrastructure components
  • Automation: Automated monitoring, alerting, and response capabilities
  • Compliance: Continuous compliance monitoring and reporting

Next Steps: Part 4 covers advanced techniques including custom security plugins, performance profiling tools, and enterprise-grade monitoring solutions.