diff --git a/.dockerignore b/.dockerignore index c0742e4..db17af9 100644 --- a/.dockerignore +++ b/.dockerignore @@ -57,5 +57,8 @@ changes.txt .env .env.local -# Data directories (MongoDB, Redis, etc.) -data/ \ No newline at end of file +# Data directory (MongoDB, Redis, genesis files) +data/ + +# Logs directory +logs/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index 1f4126d..dbf2c2c 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ aggregator.log /performance-test bft-config /data/ +/logs/ .cache/ # Environment files with secrets diff --git a/Makefile b/Makefile index e2243b9..e7c1b86 100644 --- a/Makefile +++ b/Makefile @@ -125,11 +125,21 @@ docker-restart-ha: @echo "HA Aggregator services restarted" +docker-run-sh-clean-keep-tb: + @echo "Rebuilding services with clean state but preserving BFT config, with sharding enabled as current user..." + @docker compose -f sharding-compose.yml down + @rm -rf ./data/mongodb_shard1_data ./data/mongodb_shard2_data ./data/mongodb_root_data ./data/redis_shared_data + @mkdir -p ./data/genesis/root ./data/genesis-root ./data/mongodb_shard1_data ./data/mongodb_shard2_data ./data/mongodb_root_data ./data/redis_shared_data && chmod -R 777 ./data + @mkdir -p ./logs/shard1 ./logs/shard2 ./logs/root && chmod -R 777 ./logs + @USER_UID=$$(id -u) USER_GID=$$(id -g) LOG_LEVEL=debug docker compose -f sharding-compose.yml up --force-recreate -d --build + @echo "Services rebuilt with user UID=$$(id -u):$$(id -g)" + docker-run-sh-ha-clean-keep-tb: @echo "Rebuilding services with clean state but preserving BFT config, with sharding and HA enabled as current user..." @docker compose -f sharding-ha-compose.yml down @rm -rf ./data/mongodb_shard1_data_1 ./data/mongodb_shard1_data_2 ./data/mongodb_shard1_data_3 ./data/mongodb_shard2_data_1 ./data/mongodb_shard2_data_2 ./data/mongodb_shard2_data_3 ./data/mongodb_root_data_1 ./data/mongodb_root_data_2 ./data/mongodb_root_data_3 ./data/redis_data @mkdir -p ./data/genesis/root ./data/genesis-root ./data/mongodb_shard1_data_1 ./data/mongodb_shard1_data_2 ./data/mongodb_shard1_data_3 ./data/mongodb_shard2_data_1 ./data/mongodb_shard2_data_2 ./data/mongodb_shard2_data_3 ./data/mongodb_root_data_1 ./data/mongodb_root_data_2 ./data/mongodb_root_data_3 ./data/redis_data && chmod -R 777 ./data + @mkdir -p ./logs/shard1-1 ./logs/shard1-2 ./logs/shard2-1 ./logs/shard2-2 ./logs/root-1 && chmod -R 777 ./logs @USER_UID=$$(id -u) USER_GID=$$(id -g) LOG_LEVEL=debug docker compose -f sharding-ha-compose.yml up --force-recreate -d --build @echo "Services rebuilt with user UID=$$(id -u):$$(id -g)" diff --git a/cmd/aggregator/main.go b/cmd/aggregator/main.go index 0d85144..eec18da 100644 --- a/cmd/aggregator/main.go +++ b/cmd/aggregator/main.go @@ -39,12 +39,17 @@ func main() { } // Initialize logger - baseLogger, err := logger.New( - cfg.Logging.Level, - cfg.Logging.Format, - cfg.Logging.Output, - cfg.Logging.EnableJSON, - ) + baseLogger, err := logger.NewWithConfig(logger.LogConfig{ + Level: cfg.Logging.Level, + Format: cfg.Logging.Format, + Output: cfg.Logging.Output, + EnableJSON: cfg.Logging.EnableJSON, + FilePath: cfg.Logging.FilePath, + MaxSizeMB: cfg.Logging.MaxSizeMB, + MaxBackups: cfg.Logging.MaxBackups, + MaxAgeDays: cfg.Logging.MaxAgeDays, + CompressBackups: cfg.Logging.CompressBackups, + }) if err != nil { fmt.Printf("Failed to initialize logger: %v\n", err) os.Exit(1) @@ -66,6 +71,16 @@ func main() { log = baseLogger } + // Log file logging configuration if enabled + if cfg.Logging.FilePath != "" { + log.WithComponent("main").Info("File logging enabled", + "filePath", cfg.Logging.FilePath, + "maxSizeMB", cfg.Logging.MaxSizeMB, + "maxBackups", cfg.Logging.MaxBackups, + "maxAgeDays", cfg.Logging.MaxAgeDays, + "compress", cfg.Logging.CompressBackups) + } + log.WithComponent("main").Info("Starting Unicity Aggregator") // create global context @@ -241,6 +256,11 @@ func main() { if asyncLogger != nil { asyncLogger.Stop() } + + // Close file logger if enabled + if err := baseLogger.Close(); err != nil { + fmt.Printf("Failed to close file logger: %v\n", err) + } } type leaderSelector interface { diff --git a/go.mod b/go.mod index 1167d4d..b40c50b 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( go.opentelemetry.io/otel/metric v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 golang.org/x/net v0.45.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) require ( diff --git a/go.sum b/go.sum index 3da3e31..c6cd25c 100644 --- a/go.sum +++ b/go.sum @@ -852,6 +852,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/config/config.go b/internal/config/config.go index 1a3f9e8..1e51d36 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -85,6 +85,12 @@ type LoggingConfig struct { EnableJSON bool `mapstructure:"enable_json"` EnableAsync bool `mapstructure:"enable_async"` AsyncBufferSize int `mapstructure:"async_buffer_size"` + // File logging with rotation + FilePath string `mapstructure:"file_path"` // Path to log file (empty = no file logging) + MaxSizeMB int `mapstructure:"max_size_mb"` // Max size in MB before rotation (default 100) + MaxBackups int `mapstructure:"max_backups"` // Max number of old log files to retain (default 30) + MaxAgeDays int `mapstructure:"max_age_days"` // Max days to retain old log files (default 30) + CompressBackups bool `mapstructure:"compress_backups"` // Compress rotated log files (default true) } // ProcessingConfig holds batch processing configuration @@ -283,6 +289,12 @@ func Load() (*Config, error) { EnableJSON: getEnvBoolOrDefault("LOG_ENABLE_JSON", true), EnableAsync: getEnvBoolOrDefault("LOG_ENABLE_ASYNC", true), AsyncBufferSize: getEnvIntOrDefault("LOG_ASYNC_BUFFER_SIZE", 10000), + // File logging with rotation + FilePath: getEnvOrDefault("LOG_FILE_PATH", ""), + MaxSizeMB: getEnvIntOrDefault("LOG_MAX_SIZE_MB", 100), + MaxBackups: getEnvIntOrDefault("LOG_MAX_BACKUPS", 30), + MaxAgeDays: getEnvIntOrDefault("LOG_MAX_AGE_DAYS", 30), + CompressBackups: getEnvBoolOrDefault("LOG_COMPRESS_BACKUPS", true), }, Processing: ProcessingConfig{ BatchLimit: getEnvIntOrDefault("BATCH_LIMIT", 1000), diff --git a/internal/logger/logger.go b/internal/logger/logger.go index ac5062f..74897c0 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -6,6 +6,8 @@ import ( "log/slog" "os" "strings" + + "gopkg.in/natefinch/lumberjack.v2" ) // ContextKey type for context keys @@ -21,13 +23,37 @@ const ( // Logger wraps slog.Logger with additional functionality type Logger struct { *slog.Logger + fileWriter io.Closer // holds reference to lumberjack for cleanup +} + +// LogConfig holds the configuration for creating a logger +type LogConfig struct { + Level string + Format string + Output string + EnableJSON bool + FilePath string // Path to log file (empty = no file logging) + MaxSizeMB int // Max size in MB before rotation + MaxBackups int // Max number of old log files to retain + MaxAgeDays int // Max days to retain old log files + CompressBackups bool // Compress rotated log files } // New creates a new logger instance func New(level, format, output string, enableJSON bool) (*Logger, error) { + return NewWithConfig(LogConfig{ + Level: level, + Format: format, + Output: output, + EnableJSON: enableJSON, + }) +} + +// NewWithConfig creates a new logger instance with full configuration including file rotation +func NewWithConfig(cfg LogConfig) (*Logger, error) { // Parse log level var logLevel slog.Level - switch strings.ToLower(level) { + switch strings.ToLower(cfg.Level) { case "debug": logLevel = slog.LevelDebug case "info": @@ -40,19 +66,48 @@ func New(level, format, output string, enableJSON bool) (*Logger, error) { logLevel = slog.LevelInfo } - // Determine output writer - var writer io.Writer - switch output { - case "stdout", "": - writer = os.Stdout + // Determine console output writer + var consoleWriter io.Writer + switch cfg.Output { case "stderr": - writer = os.Stderr + consoleWriter = os.Stderr default: - file, err := os.OpenFile(output, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - if err != nil { - return nil, err + consoleWriter = os.Stdout + } + + // Build the final writer (console only, or multi-writer with file) + var writer io.Writer + var fileWriter *lumberjack.Logger + + if cfg.FilePath != "" { + // Set defaults if not provided + maxSize := cfg.MaxSizeMB + if maxSize <= 0 { + maxSize = 100 // 100 MB default } - writer = file + maxBackups := cfg.MaxBackups + if maxBackups <= 0 { + maxBackups = 30 + } + maxAge := cfg.MaxAgeDays + if maxAge <= 0 { + maxAge = 30 + } + + // Create lumberjack logger for file rotation + fileWriter = &lumberjack.Logger{ + Filename: cfg.FilePath, + MaxSize: maxSize, // megabytes + MaxBackups: maxBackups, // number of backups + MaxAge: maxAge, // days + Compress: cfg.CompressBackups, + LocalTime: true, // use local time for backup timestamps + } + + // Combine console and file writers + writer = io.MultiWriter(consoleWriter, fileWriter) + } else { + writer = consoleWriter } // Create handler options @@ -62,14 +117,22 @@ func New(level, format, output string, enableJSON bool) (*Logger, error) { // Create handler based on format var handler slog.Handler - if enableJSON || format == "json" { + if cfg.EnableJSON || cfg.Format == "json" { handler = slog.NewJSONHandler(writer, opts) } else { handler = slog.NewTextHandler(writer, opts) } logger := slog.New(handler) - return &Logger{Logger: logger}, nil + return &Logger{Logger: logger, fileWriter: fileWriter}, nil +} + +// Close closes any file writers (call on shutdown for clean log rotation) +func (l *Logger) Close() error { + if l.fileWriter != nil { + return l.fileWriter.Close() + } + return nil } // WithContext creates a new logger with context values diff --git a/sharding-compose.yml b/sharding-compose.yml index 2a53efa..921b7bb 100644 --- a/sharding-compose.yml +++ b/sharding-compose.yml @@ -168,6 +168,7 @@ services: - default volumes: - ./data/genesis:/app/bft-config + - ./logs/shard1:/app/logs - ./certs:/app/certs:ro ulimits: nofile: @@ -200,6 +201,7 @@ services: LOG_LEVEL: "debug" LOG_FORMAT: "json" LOG_ENABLE_JSON: "true" + LOG_FILE_PATH: "/app/logs/aggregator.log" # Processing Configuration BATCH_LIMIT: "1000" @@ -273,6 +275,9 @@ services: container_name: aggregator-shard2 ports: - "3002:3000" + volumes: + - ./data/genesis:/app/bft-config + - ./logs/shard2:/app/logs environment: <<: *environment-base MONGODB_URI: "mongodb://mongodb-shard2:27017/aggregator?replicaSet=rs-shard2&directConnection=true" @@ -295,6 +300,9 @@ services: container_name: aggregator-root ports: - "3009:3000" + volumes: + - ./data/genesis:/app/bft-config + - ./logs/root:/app/logs environment: <<: *environment-base MONGODB_URI: "mongodb://mongodb-root:27017/aggregator?replicaSet=rs-root&directConnection=true" diff --git a/sharding-ha-compose.yml b/sharding-ha-compose.yml index 6c52671..93c6f24 100644 --- a/sharding-ha-compose.yml +++ b/sharding-ha-compose.yml @@ -287,6 +287,7 @@ services: - default volumes: - ./data/genesis:/app/bft-config + - ./logs/shard1-1:/app/logs depends_on: - mongo-setup-shard1 - bft-aggregator-genesis-gen @@ -308,6 +309,7 @@ services: LOG_LEVEL: "debug" LOG_FORMAT: "json" LOG_ENABLE_JSON: "true" + LOG_FILE_PATH: "/app/logs/aggregator.log" BATCH_LIMIT: "1000" COLLECT_PHASE_DURATION: "500ms" BFT_ENABLED: "false" @@ -361,6 +363,9 @@ services: container_name: aggregator-shard1-2 ports: - "3012:3000" + volumes: + - ./data/genesis:/app/bft-config + - ./logs/shard1-2:/app/logs environment: <<: *environment-base MONGODB_URI: "mongodb://mongodb-shard1-1:27017,mongodb-shard1-2:27017,mongodb-shard1-3:27017/aggregator?replicaSet=rs1" @@ -375,6 +380,9 @@ services: container_name: aggregator-shard2-1 ports: - "3021:3000" + volumes: + - ./data/genesis:/app/bft-config + - ./logs/shard2-1:/app/logs environment: <<: *environment-base MONGODB_URI: "mongodb://mongodb-shard2-1:27017,mongodb-shard2-2:27017,mongodb-shard2-3:27017/aggregator?replicaSet=rs2" @@ -390,6 +398,9 @@ services: container_name: aggregator-shard2-2 ports: - "3022:3000" + volumes: + - ./data/genesis:/app/bft-config + - ./logs/shard2-2:/app/logs environment: <<: *environment-base MONGODB_URI: "mongodb://mongodb-shard2-1:27017,mongodb-shard2-2:27017,mongodb-shard2-3:27017/aggregator?replicaSet=rs2" @@ -410,6 +421,9 @@ services: container_name: aggregator-root-1 ports: - "3009:3000" + volumes: + - ./data/genesis:/app/bft-config + - ./logs/root-1:/app/logs environment: <<: *environment-base MONGODB_URI: "mongodb://mongodb-root-1:27017,mongodb-root-2:27017,mongodb-root-3:27017/aggregator?replicaSet=rs-root"