Memory Ordering and Atomicity

Go’s memory model also defines guarantees around memory ordering in concurrent programs:

package main

import (
	"fmt"
	"sync"
	"sync/atomic"
)

func main() {
	// Unsafe counter with data race
	var unsafeCounter int64
	
	// Safe counter using atomic operations
	var safeCounter int64
	
	// Safe counter using mutex
	var mutexCounter int64
	var mu sync.Mutex
	
	var wg sync.WaitGroup
	iterations := 1000
	
	// Launch 10 goroutines for each counter type
	for i := 0; i < 10; i++ {
		wg.Add(3)
		
		// Unsafe counter goroutine
		go func() {
			defer wg.Done()
			for j := 0; j < iterations; j++ {
				unsafeCounter++ // Data race!
			}
		}()
		
		// Atomic counter goroutine
		go func() {
			defer wg.Done()
			for j := 0; j < iterations; j++ {
				atomic.AddInt64(&safeCounter, 1) // Thread-safe
			}
		}()
		
		// Mutex counter goroutine
		go func() {
			defer wg.Done()
			for j := 0; j < iterations; j++ {
				mu.Lock()
				mutexCounter++ // Thread-safe but potentially slower
				mu.Unlock()
			}
		}()
	}
	
	wg.Wait()
	
	fmt.Printf("Unsafe counter (data race): %d\n", unsafeCounter)
	fmt.Printf("Atomic counter: %d\n", safeCounter)
	fmt.Printf("Mutex counter: %d\n", mutexCounter)
}

This example illustrates different approaches to handling shared memory in concurrent programs, highlighting the importance of proper synchronization to avoid data races.


Advanced Memory Management Techniques

Now that we understand the fundamentals, let’s explore advanced techniques for managing memory in Go applications.

Custom Memory Pools

For applications that frequently allocate and deallocate objects of the same size, custom memory pools can significantly reduce GC pressure:

package main

import (
	"fmt"
	"sync"
	"time"
)

// A simple fixed-size object we want to pool
type Buffer struct {
	data [1024]byte
}

// Custom memory pool implementation
type BufferPool struct {
	pool sync.Pool
}

// Create a new buffer pool
func NewBufferPool() *BufferPool {
	return &BufferPool{
		pool: sync.Pool{
			New: func() interface{} {
				buffer := &Buffer{}
				return buffer
			},
		},
	}
}

// Get a buffer from the pool
func (p *BufferPool) Get() *Buffer {
	return p.pool.Get().(*Buffer)
}

// Return a buffer to the pool
func (p *BufferPool) Put(buffer *Buffer) {
	p.pool.Put(buffer)
}

func main() {
	// Create a buffer pool
	bufferPool := NewBufferPool()
	
	// Benchmark without pool
	startWithoutPool := time.Now()
	for i := 0; i < 1000000; i++ {
		buffer := &Buffer{}
		// Simulate using the buffer
		buffer.data[0] = byte(i)
		// Without pool, buffer will be garbage collected
	}
	withoutPoolDuration := time.Since(startWithoutPool)
	
	// Benchmark with pool
	startWithPool := time.Now()
	for i := 0; i < 1000000; i++ {
		buffer := bufferPool.Get()
		// Simulate using the buffer
		buffer.data[0] = byte(i)
		// Return to pool instead of letting GC collect it
		bufferPool.Put(buffer)
	}
	withPoolDuration := time.Since(startWithPool)
	
	fmt.Printf("Without pool: %v\n", withoutPoolDuration)
	fmt.Printf("With pool: %v\n", withPoolDuration)
	fmt.Printf("Performance improvement: %.2fx\n", 
		float64(withoutPoolDuration)/float64(withPoolDuration))
}