mtlog

Quick Reference

A quick reference for all mtlog features and common usage patterns.

Basic Setup

import (
    "github.com/willibrandon/mtlog"
    "github.com/willibrandon/mtlog/core"
)

// Simple logger
logger := mtlog.New(mtlog.WithConsole())

// Production logger
logger := mtlog.New(
    mtlog.WithConsoleTheme("dark"),
    mtlog.WithSeq("http://localhost:5341", "api-key"),
    mtlog.WithMinimumLevel(core.InformationLevel),
)

Logging Methods

Traditional Methods

logger.Verbose("Verbose message")
logger.Debug("Debug: {Value}", value)
logger.Information("Info: {User} {Action}", user, action)
logger.Warning("Warning: {Count} items", count)
logger.Error("Error: {Error}", err)
logger.Fatal("Fatal: {Reason}", reason)

Generic Methods (Type-Safe)

logger.VerboseT("Verbose message")
logger.DebugT("Debug: {Value}", value)
logger.InformationT("Info: {User} {Action}", user, action)
logger.WarningT("Warning: {Count} items", count)
logger.ErrorT("Error: {Error}", err)
logger.FatalT("Fatal: {Reason}", reason)

Short Methods

logger.V("Verbose")
logger.D("Debug: {Value}", value)
logger.I("Info: {Message}", msg)
logger.W("Warning: {Issue}", issue)
logger.E("Error: {Error}", err)
logger.F("Fatal: {Reason}", reason)

Message Templates

Template Syntaxes

// Traditional syntax
log.Information("User {UserId} logged in from {IP}", userId, ipAddress)

// Go template syntax
log.Information("User {{.UserId}} logged in from {{.IP}}", userId, ipAddress)

// Mixed syntax
log.Information("User {UserId} ({{.Username}}) from {IP}", userId, username, ipAddress)

Capturing Hints

// @ - capture complex types
log.Information("Order {@Order} created", order)

// $ - force scalar rendering
log.Information("Error occurred: {$Error}", err)

Format Specifiers

// Numbers
log.Information("Count: {Count:000}", 42)        // 042
log.Information("Price: ${Price:F2}", 123.456)   // $123.46
log.Information("Usage: {Percent:P1}", 0.85)     // 85.0%

// Custom alignment
log.Information("Status: {Status,10}", "OK")     // "OK        "
log.Information("Code: {Code,-5}", "ABC")        // "ABC  "

Output Templates

Console Templates

// Simple
mtlog.WithConsoleTemplate("[${Level:u3}] ${Message}")

// With timestamp
mtlog.WithConsoleTemplate("[${Timestamp:HH:mm:ss} ${Level:u3}] ${Message}")

// Full details
mtlog.WithConsoleTemplate(
    "[${Timestamp:yyyy-MM-dd HH:mm:ss} ${Level:u3}] {SourceContext}: ${Message}${NewLine}${Exception}")

File Templates

mtlog.WithFileTemplate("app.log", 
    "[${Timestamp:o} ${Level:u3}] {SourceContext} ${Message}${NewLine}${Exception}")

Enrichers

// Built-in enrichers
logger := mtlog.New(
    mtlog.WithTimestamp(),
    mtlog.WithMachineName(),
    mtlog.WithProcessInfo(),
    mtlog.WithEnvironmentVariables("APP_ENV", "VERSION"),
    mtlog.WithThreadId(),
    mtlog.WithCallersInfo(),
    mtlog.WithSourceContext(),
)

// Custom enricher
type UserEnricher struct{ userID int }

func (e *UserEnricher) Enrich(event *core.LogEvent, factory core.LogEventPropertyFactory) {
    event.AddPropertyIfAbsent(factory.CreateProperty("UserId", e.userID))
}

logger := mtlog.New(
    mtlog.WithEnricher(&UserEnricher{userID: 123}),
)

Filters

// Level filtering
mtlog.WithMinimumLevel(core.WarningLevel)

// Level overrides by source
mtlog.WithMinimumLevelOverrides(map[string]core.LogEventLevel{
    "myapp/services": core.DebugLevel,
    "github.com/gin-gonic/gin": core.WarningLevel,
})

// Custom filter
mtlog.WithFilter(filters.NewPredicateFilter(func(e *core.LogEvent) bool {
    return !strings.Contains(e.MessageTemplate.Text, "health-check")
}))

// Rate limiting
mtlog.WithFilter(filters.NewRateLimitFilter(100, time.Minute))

// Sampling
mtlog.WithFilter(filters.NewSamplingFilter(0.1)) // 10% of events

Sinks

Console

// Themes
mtlog.WithConsoleTheme(sinks.LiterateTheme())
mtlog.WithConsoleTheme(sinks.DarkTheme())
mtlog.WithConsoleTheme(sinks.LightTheme())
mtlog.WithConsoleTheme(sinks.NoColorTheme())

File

// Simple file
mtlog.WithFileSink("app.log")

// Rolling by size
mtlog.WithRollingFile("app.log", 10*1024*1024) // 10MB

// Rolling by time
mtlog.WithRollingFileTime("app.log", time.Hour)

Seq

// Basic
mtlog.WithSeq("http://localhost:5341")

// With API key
mtlog.WithSeq("http://localhost:5341", "your-api-key")

// Dynamic level control
levelOption, levelSwitch, controller := mtlog.WithSeqLevelControl(
    "http://localhost:5341",
    mtlog.SeqLevelControllerOptions{
        CheckInterval: 30*time.Second,
    },
)

Elasticsearch

// Basic
mtlog.WithElasticsearch("http://localhost:9200", "logs")

// With multiple nodes and API key
mtlog.WithElasticsearchAdvanced(
    []string{"http://node1:9200", "http://node2:9200"},
    sinks.WithElasticsearchIndex("logs-%{+yyyy.MM.dd}"),
    sinks.WithElasticsearchAPIKey("your-api-key"),
    sinks.WithElasticsearchBatchSize(100),
)

Splunk

// Basic HEC integration
mtlog.WithSplunk("http://localhost:8088", "hec-token")

// Advanced configuration
mtlog.WithSplunkAdvanced("http://localhost:8088",
    sinks.WithSplunkToken("your-hec-token"),
    sinks.WithSplunkIndex("main"),
    sinks.WithSplunkSource("mtlog"),
    sinks.WithSplunkSourceType("_json"),
)

Sentry

import "github.com/willibrandon/mtlog/adapters/sentry"

// Basic error tracking
sink, _ := sentry.WithSentry("https://key@sentry.io/project")
log := mtlog.New(mtlog.WithSink(sink))

// With sampling for high-volume applications
sink, _ := sentry.WithSentry("https://key@sentry.io/project",
    sentry.WithFixedSampling(0.1),  // 10% sampling
)

// Advanced configuration with performance monitoring
sink, _ := sentry.WithSentry("https://key@sentry.io/project",
    sentry.WithEnvironment("production"),
    sentry.WithRelease("v1.2.3"),
    sentry.WithTracesSampleRate(0.2),
    sentry.WithProfilesSampleRate(0.1),
    sentry.WithAdaptiveSampling(0.01, 0.5),  // 1% to 50% adaptive
    sentry.WithRetryPolicy(3, time.Second),
    sentry.WithStackTraceCache(1000),
)

OpenTelemetry (OTLP)

import "github.com/willibrandon/mtlog/adapters/otel"

// Basic OTLP sink
logger := otel.NewOTELLogger(
    otel.WithOTLPEndpoint("localhost:4317"),
    otel.WithOTLPInsecure(),
)

// With batching and compression
logger := mtlog.New(
    otel.WithOTLPSink(
        otel.WithOTLPEndpoint("otel-collector:4317"),
        otel.WithOTLPBatching(100, 5*time.Second),
        otel.WithOTLPCompression("gzip"),
    ),
)

// With trace context enrichment
logger := otel.NewRequestLogger(ctx,
    otel.WithOTLPEndpoint("localhost:4317"),
    otel.WithOTLPInsecure(),
)

// With sampling strategies
logger := mtlog.New(
    otel.WithOTLPSink(
        otel.WithOTLPEndpoint("localhost:4317"),
        otel.WithOTLPSampling(otel.NewRateSampler(0.1)), // 10% sampling
    ),
)

Async & Durable

// Async wrapper
mtlog.WithAsync(mtlog.WithFileSink("app.log"))

// Durable buffering
mtlog.WithDurable(
    mtlog.WithSeq("http://localhost:5341"),
    sinks.WithDurableDirectory("./logs/buffer"),
    sinks.WithDurableMaxSize(100*1024*1024),
)

Event Routing

// Conditional sink - zero overhead for non-matching events
alertSink := sinks.NewConditionalSink(
    func(e *core.LogEvent) bool { 
        return e.Level >= core.ErrorLevel && e.Properties["Alert"] != nil 
    },
    sinks.NewFileSink("alerts.log"),
)

// Built-in predicates
sinks.LevelPredicate(core.ErrorLevel)                    // Level filtering
sinks.PropertyPredicate("Audit")                         // Property exists
sinks.PropertyValuePredicate("Environment", "production") // Property value
sinks.AndPredicate(pred1, pred2)                         // All must match
sinks.OrPredicate(pred1, pred2)                          // Any matches
sinks.NotPredicate(pred)                                 // Invert predicate

// Router sink - FirstMatch mode (exclusive routing)
router := sinks.NewRouterSink(sinks.FirstMatch,
    sinks.ErrorRoute("errors", errorSink),
    sinks.AuditRoute("audit", auditSink),
)

// Router sink - AllMatch mode (broadcast routing)  
router := sinks.NewRouterSink(sinks.AllMatch,
    sinks.MetricRoute("metrics", metricsSink),
    sinks.AuditRoute("audit", auditSink),
)

// Dynamic route management
router.AddRoute(sinks.Route{
    Name:      "debug",
    Predicate: func(e *core.LogEvent) bool { return e.Level <= core.DebugLevel },
    Sink:      debugSink,
})
router.RemoveRoute("debug")

// Fluent route builder
route := sinks.NewRoute("special").
    When(func(e *core.LogEvent) bool { return e.Properties["Special"] != nil }).
    To(specialSink)

ForType - Type-Based Logging

// Automatic SourceContext from types
userLogger := mtlog.ForType[User](logger)
userLogger.Information("User created") // SourceContext: "User"

// Service pattern
type UserService struct {
    logger core.Logger
}

func NewUserService(base core.Logger) *UserService {
    return &UserService{
        logger: mtlog.ForType[UserService](base),
    }
}

// Advanced options
opts := mtlog.TypeNameOptions{
    IncludePackage: true,
    Prefix: "MyApp.",
}
name := mtlog.ExtractTypeName[User](opts) // "MyApp.mypackage.User"

Per-Message Sampling

Basic Sampling

// Sample every Nth message
sampledLogger := logger.Sample(10)  // Every 10th message

// Time-based sampling
sampledLogger := logger.SampleDuration(time.Second)  // At most once per second

// Rate-based sampling (percentage)
sampledLogger := logger.SampleRate(0.1)  // 10% of messages

// First N occurrences
sampledLogger := logger.SampleFirst(100)  // First 100 messages only

Advanced Sampling

// Group sampling - share counter across loggers
dbLogger := logger.SampleGroup("database", 10)
cacheLogger := logger.SampleGroup("database", 10)  // Same counter

// Conditional sampling
var highLoad atomic.Bool
sampledLogger := logger.SampleWhen(func() bool {
    return highLoad.Load()
}, 5)  // Every 5th when condition true

// Exponential backoff
errorLogger := logger.SampleBackoff("connection-error", 2.0)
// Logs at: 1st, 2nd, 4th, 8th, 16th, 32nd...

Configuration

// Default sampling for all messages
logger := mtlog.New(
    mtlog.WithConsole(),
    mtlog.WithDefaultSampling(100),  // Every 100th by default
)

// Reset sampling counters
sampledLogger.ResetSampling()
logger.ResetSamplingGroup("database")

// Sampling statistics
sampledLogger.EnableSamplingSummary(5 * time.Minute)
sampled, skipped := sampledLogger.GetSamplingStats()

// Cache warmup (at startup)
mtlog.WarmupSamplingGroups([]string{"database", "api"})
mtlog.WarmupSamplingBackoff([]string{"error", "timeout"}, 2.0)

Advanced Sampling Configuration

Predefined Sampling Profiles

Ready-to-use sampling profiles for common production scenarios:

// High-traffic API endpoints (1% sampling)
apiLogger := logger.SampleProfile("HighTrafficAPI")

// Background workers (10% sampling)
workerLogger := logger.SampleProfile("BackgroundWorker")

// Error logging with exponential backoff
errorLogger := logger.SampleProfile("ErrorReporting")

// Debug mode with higher sampling (25%)
debugLogger := logger.SampleProfile("DebugVerbose")

// Interactive user actions (50% sampling)
userLogger := logger.SampleProfile("UserInteractive")

// Database operations (every 5th message)
dbLogger := logger.SampleProfile("DatabaseOps")

// Analytics events (5% sampling)
analyticsLogger := logger.SampleProfile("Analytics")

// System health monitoring (time-based, once per second)
healthLogger := logger.SampleProfile("SystemHealth")

Adaptive Sampling

Automatically adjusts sampling rates to maintain target throughput:

// Target 100 events per second - automatically adjusts sampling rate
adaptiveLogger := logger.SampleAdaptive(100)

// Advanced adaptive sampling with bounds
adaptiveLogger := logger.SampleAdaptiveWithOptions(
    250,                    // Target: 250 events/second
    0.01,                   // Minimum rate: 1%
    1.0,                    // Maximum rate: 100%
    30*time.Second,         // Check interval
)

// Advanced adaptive sampling with hysteresis for stability
hysteresisLogger := logger.SampleAdaptiveWithHysteresis(
    200,                    // Target: 200 events/second
    0.005,                  // Minimum rate: 0.5%
    0.8,                    // Maximum rate: 80%
    15*time.Second,         // Check interval
    0.15,                   // Hysteresis: 15% (prevents oscillation)
    0.7,                    // Aggressiveness: 70% (smoother adjustments)
)

// Ultimate adaptive sampling with dampening for extreme load
dampenedLogger := logger.SampleAdaptiveWithDampening(
    200,                    // Target: 200 events/second
    0.005,                  // Minimum rate: 0.5%
    0.8,                    // Maximum rate: 80%
    15*time.Second,         // Check interval
    0.15,                   // Hysteresis: 15% (prevents oscillation)
    0.7,                    // Aggressiveness: 70% (smoother adjustments)
    0.4,                    // Dampening: 40% (reduces oscillation)
)

// Simplified adaptive sampling with dampening presets
conservativeLogger := logger.SampleAdaptiveWithPreset(100, mtlog.DampeningConservative)
moderateLogger := logger.SampleAdaptiveWithPreset(100, mtlog.DampeningModerate)
aggressiveLogger := logger.SampleAdaptiveWithPreset(100, mtlog.DampeningAggressive)
ultraStableLogger := logger.SampleAdaptiveWithPreset(100, mtlog.DampeningUltraStable)
responsiveLogger := logger.SampleAdaptiveWithPreset(100, mtlog.DampeningResponsive)

// Custom rate limits with presets
customLogger := logger.SampleAdaptiveWithPresetCustom(150, mtlog.DampeningAggressive, 0.05, 0.8)

// Available Presets:
// - Conservative: Heavy dampening for stable production (3s intervals)
// - Moderate: Balanced for general production use (1s intervals)
// - Aggressive: Light dampening for dynamic environments (500ms intervals)
// - Ultra Stable: Maximum stability for critical systems (5s intervals)
// - Responsive: Minimal dampening for development (200ms intervals)

// Adaptive sampling automatically:
// - Measures actual event rate
// - Increases sampling when below target
// - Decreases sampling when above target
// - Uses hysteresis to prevent rate oscillation
// - Applies exponential smoothing for stability
// - Stays within configured min/max bounds

Custom Sampling Profiles

Create application-specific sampling profiles:

// Define custom profiles for your application
customProfiles := map[string]mtlog.SamplingProfile{
    "PaymentProcessing": {
        Description: "Critical payment operations - log all errors, sample others",
        Config: func() mtlog.Option {
            return mtlog.Sampling().
                When(func() bool { return getCurrentErrorRate() > 0.01 }, 1). // All errors
                Rate(0.1).                                                     // 10% normal ops
                CombineOR()
        },
    },
    "UserAnalytics": {
        Description: "User behavior tracking",
        Config: func() mtlog.Option {
            return mtlog.Sampling().
                First(1000).     // First 1000 events per user
                Rate(0.05).      // Then 5% sampling
                Build()
        },
    },
}

// Register and use custom profiles
mtlog.RegisterSamplingProfiles(customProfiles)

// Bulk register multiple profiles with error handling
if err := mtlog.RegisterCustomProfiles(customProfiles); err != nil {
    log.Fatal("Failed to register sampling profiles:", err)
}

// Freeze profile registry after registration (recommended for production)
mtlog.FreezeProfiles()

// Use custom profiles
paymentLogger := logger.SampleProfile("PaymentProcessing")

// Profile versioning for backward compatibility
mtlog.AddCustomProfileWithVersion("PaymentV2", "Enhanced payment processing", "2.0", false, "", 
    func() core.LogEventFilter { return mtlog.Sampling().Rate(0.05).Build() })

// Use specific version
legacyPayment := logger.SampleProfileWithVersion("PaymentV2", "1.0")
modernPayment := logger.SampleProfileWithVersion("PaymentV2", "2.0")

// Version management
versions := mtlog.GetProfileVersions("PaymentV2")
isDeprecated, replacement := mtlog.IsProfileDeprecated("PaymentV2")

// Profile version auto-migration
mtlog.SetMigrationPolicy(mtlog.MigrationPolicy{
    Consent:            mtlog.MigrationAuto,  // Auto-migrate without prompting
    PreferStable:       true,                // Skip deprecated versions
    MaxVersionDistance: 1,                   // Allow migration within 1 major version
})

// Request version that might not exist - auto-migrates to compatible version
profile, actualVersion, found := mtlog.GetProfileWithMigration("PaymentV2", "1.5")
migratedLogger := logger.SampleProfileWithVersion("PaymentV2", "1.3") // Auto-migrates if needed

// Migration Consent Modes:
// - MigrationDeny: Strict mode - fail if exact version not found
// - MigrationPrompt: Log warning and migrate to best available (default)
// - MigrationAuto: Silent automatic migration to compatible versions

Fluent Sampling Configuration Builder

// Pipeline-style sampling (filters applied in sequence)
logger := mtlog.New(
    mtlog.WithConsole(),
    mtlog.Sampling().
        Every(10).       // First: sample every 10th message
        Rate(0.5).       // Then: 50% of those that pass
        First(100).      // Finally: only first 100 that make it through
        Build(),         // Apply as sequential pipeline
)

// Composite AND sampling (all conditions must match)
logger := mtlog.New(
    mtlog.WithConsole(),
    mtlog.Sampling().
        Every(2).        // Must be every 2nd message
        First(10).       // Must be within first 10 evaluations
        CombineAND(),    // Both conditions must be true
)

// Composite OR sampling (any condition can match)
logger := mtlog.New(
    mtlog.WithConsole(),
    mtlog.Sampling().
        Every(5).        // Either every 5th message
        First(3).        // Or first 3 messages
        CombineOR(),     // Either condition allows logging
)

Custom Sampling Policies

// Implement SamplingPolicy interface for complex logic
type UserBasedSamplingPolicy struct {
    adminRate   float32
    premiumRate float32  
    basicRate   float32
}

func (p *UserBasedSamplingPolicy) ShouldSample(event *core.LogEvent) bool {
    userTier, _ := event.Properties["UserTier"].(string)
    switch userTier {
    case "admin":   return true
    case "premium": return rand.Float32() < p.premiumRate
    case "basic":   return rand.Float32() < p.basicRate
    default:        return false
    }
}

// Use the custom policy
logger := mtlog.New(
    mtlog.WithConsole(),
    mtlog.WithSamplingPolicy(&UserBasedSamplingPolicy{
        adminRate: 1.0, premiumRate: 0.5, basicRate: 0.1,
    }),
)

Pipeline vs Composite Behavior

Key Differences:

  • Pipeline (Build()): Filters applied sequentially. Each filter only sees events that passed the previous filter
  • Composite (CombineAND/OR()): Each filter evaluates all events independently, results combined with logical AND/OR
// Pipeline: Every(2) → First(5) → Result  
mtlog.Sampling().Every(2).First(5).Build()

// Composite: Every(2) AND First(5) → Result
mtlog.Sampling().Every(2).First(5).CombineAND()

Production Pattern

// Different sampling for different endpoints
healthLogger := logger.
    ForContext("Endpoint", "/health").
    SampleDuration(10 * time.Second)  // Once per 10 seconds

apiLogger := logger.
    ForContext("Endpoint", "/api/users").
    SampleRate(0.01)  // 1% sampling

errorLogger := logger.
    SampleBackoff("api-error", 2.0)  // Exponential backoff

Context Logging

With() Method - Structured Fields

// Basic usage with key-value pairs
logger.With("service", "api", "version", "1.0").Info("Service started")

// Chaining With() calls
logger.
    With("environment", "production").
    With("region", "us-west-2").
    Info("Deployment complete")

// Create a base logger with common fields
apiLogger := logger.With(
    "component", "api",
    "host", "api-server-01",
)

// Request-scoped logging
requestLogger := apiLogger.With(
    "request_id", "abc-123",
    "user_id", 456,
)
requestLogger.Info("Request started")
requestLogger.With("duration_ms", 42).Info("Request completed")

ForContext() Method

// Add single context property
contextLogger := logger.ForContext("RequestId", "abc-123")

// Multiple properties (variadic)
contextLogger := logger.ForContext("UserId", 123, "SessionId", "xyz")

// Source context for sub-loggers
serviceLogger := logger.ForSourceContext("MyApp.Services.UserService")

With() vs ForContext():

  • With(): Accepts variadic key-value pairs (slog-style), convenient for multiple fields
  • ForContext(): Takes property name and value(s), returns a new logger
  • Both create a new logger instance with the combined properties

LogContext - Scoped Properties

// Add properties to context
ctx := context.Background()
ctx = mtlog.PushProperty(ctx, "RequestId", "req-123")
ctx = mtlog.PushProperty(ctx, "UserId", userId)

// Use with logger
log := logger.WithContext(ctx)
log.Information("Processing request") // Includes RequestId & UserId

// Property precedence
ctx = mtlog.PushProperty(ctx, "UserId", 123)
logger.WithContext(ctx).Information("Test")                          // UserId=123
logger.WithContext(ctx).ForContext("UserId", 456).Information("Test") // UserId=456
logger.WithContext(ctx).Information("User {UserId}", 789)            // UserId=789

Context Deadline Awareness

Context-Aware Methods

// All levels have context variants
logger.InfoContext(ctx, "Processing request")
logger.ErrorContext(ctx, "Operation failed: {Error}", err)
logger.DebugContext(ctx, "Cache hit for key {Key}", key)

// Context methods automatically:
// - Extract LogContext properties
// - Check for approaching deadlines
// - Include deadline properties when warning

Deadline Configuration

// Absolute threshold - warn when 100ms remains
logger := mtlog.New(
    mtlog.WithContextDeadlineWarning(100*time.Millisecond),
)

// Percentage threshold - warn when 20% remains
logger := mtlog.New(
    mtlog.WithDeadlinePercentageThreshold(
        10*time.Millisecond, // Min absolute
        0.2,                 // 20% threshold
    ),
)

Properties Added

// When approaching deadline
{
    "deadline.approaching": true,
    "deadline.remaining_ms": 95,
    "deadline.at": "2024-01-15T10:30:45Z",
    "deadline.first_warning": true
}

// When deadline exceeded
{
    "deadline.exceeded": true,
    "deadline.exceeded_by_ms": 150
}

Example Usage

ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()

logger.InfoContext(ctx, "Starting operation")
time.Sleep(350 * time.Millisecond)
logger.InfoContext(ctx, "Still processing...") // WARNING: Deadline approaching!

Dynamic Level Control

// Manual control
levelSwitch := mtlog.NewLoggingLevelSwitch(core.InformationLevel)
logger := mtlog.New(
    mtlog.WithLevelSwitch(levelSwitch),
    mtlog.WithConsole(),
)

// Change at runtime
levelSwitch.SetLevel(core.DebugLevel)

// Fluent interface
levelSwitch.Debug().Information().Warning()

// Check if enabled
if levelSwitch.IsEnabled(core.VerboseLevel) {
    // Expensive operation
}

HTTP Middleware

Basic Setup

import (
    "github.com/willibrandon/mtlog"
    "github.com/willibrandon/mtlog/adapters/middleware"
)

logger := mtlog.New(mtlog.WithConsole())

// net/http
mw := middleware.Middleware(middleware.DefaultOptions(logger))
handler := mw(yourHandler)

// Gin
router.Use(middleware.Gin(logger))

// Echo
e.Use(middleware.Echo(logger))

// Fiber
app.Use(middleware.Fiber(logger))

// Chi
r.Use(middleware.Chi(logger))

Configuration

options := &middleware.Options{
    Logger:            logger,
    GenerateRequestID: true,
    RequestIDHeader:   "X-Request-ID",
    SkipPaths:         []string{"/health", "/metrics"},
    RequestFields:     []string{"method", "path", "ip", "user_agent"},
    LatencyField:      "duration_ms",
    LatencyUnit:      "ms",
    
    // Body logging with sanitization
    LogRequestBody:   true,
    LogResponseBody:  true,
    MaxBodySize:      4096,
    BodySanitizer:    middleware.DefaultBodySanitizer,
    
    // Sampling strategies
    Sampler: middleware.NewPathSamplerBuilder().
        Never("/health").
        Sometimes("/api/status", 0.1).
        Always("*").
        Build(),
}

Request Logger Helper

func handler(w http.ResponseWriter, r *http.Request) {
    reqLogger := middleware.GetRequestLogger(r).
        WithUser("user-123").
        WithOperation("CreateOrder").
        WithResource("Order", "ord-456")
    
    reqLogger.Information("Processing order creation")
    
    if err := processOrder(); err != nil {
        reqLogger.WithError(err).Error("Order creation failed")
    }
}

Health Checks

// Basic health handler
healthHandler := middleware.NewHealthCheckHandler(logger).
    WithVersion("1.0.0").
    WithEnvironment("production").
    WithMetrics(true)

// Add custom checks
healthHandler.AddCheck("database", func() middleware.Check {
    if err := db.Ping(); err != nil {
        return middleware.Check{Status: "unhealthy", Error: err.Error()}
    }
    return middleware.Check{Status: "healthy"}
})

http.Handle("/health", healthHandler)

// Simple handlers
http.HandleFunc("/liveness", middleware.LivenessHandler())
http.HandleFunc("/readiness", middleware.ReadinessHandler(
    middleware.DatabaseHealthChecker("postgres", db.Ping),
))

📖 Complete Guide:

For detailed documentation, advanced features, and configuration options, see the HTTP Middleware Guide.

Ecosystem Compatibility

slog (Standard Library)

// Use mtlog as slog backend
slogger := mtlog.NewSlogLogger(
    mtlog.WithSeq("http://localhost:5341"),
    mtlog.WithMinimumLevel(core.InformationLevel),
)

// Set as default
slog.SetDefault(slogger)

// Use slog API
slog.Info("user logged in", "user_id", 123, "ip", "192.168.1.1")

logr (Kubernetes)

// Use mtlog as logr backend
import mtlogr "github.com/willibrandon/mtlog/adapters/logr"

logrLogger := mtlogr.NewLogger(
    mtlog.WithConsole(),
    mtlog.WithMinimumLevel(core.DebugLevel),
)

// Use logr API
logrLogger.Info("reconciling", "namespace", "default", "name", "my-app")
logrLogger.Error(err, "failed to update resource")

Static Analysis

mtlog-analyzer

Catch common mistakes at compile time with static analysis:

# Install
go install github.com/willibrandon/mtlog/cmd/mtlog-analyzer@latest

# Run with go vet
go vet -vettool=$(which mtlog-analyzer) ./...

Common Diagnostics

// MTLOG001: Template/argument mismatch
log.Info("User {Id} from {IP}", userId)  // ❌ Missing IP argument

// MTLOG003: Duplicate properties
log.Info("{Id} and {Id}", 1, 2)  // ❌ Duplicate 'Id'
log.With("id", 1, "id", 2)       // ❌ Duplicate key in With()

// MTLOG009: With() odd arguments
log.With("key1", "val1", "key2")  // ❌ Missing value

// MTLOG010: With() non-string key
log.With(123, "value")  // ❌ Key must be string

// MTLOG011: Cross-call duplicate
logger := log.With("service", "api")
logger.With("service", "auth")  // ⚠️ Overrides 'service'

// MTLOG013: Empty key
log.With("", "value")  // ❌ Empty key ignored

IDE Extensions

Real-time validation for mtlog message templates with the mtlog-analyzer

Visual Studio Code

Get instant feedback on template/argument mismatches, property naming, and more.

VS Code Marketplace
code --install-extension mtlog.mtlog-analyzer

GoLand / IntelliJ IDEA

Real-time annotations with quick fixes for common issues.

JetBrains Marketplace
Settings → Plugins → Search "mtlog-analyzer"

Neovim

LSP integration with code actions, quick fixes, and advanced diagnostics.

Neovim Plugin
rtp = 'neovim-plugin'