Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -57,5 +57,8 @@ changes.txt
.env
.env.local

# Data directories (MongoDB, Redis, etc.)
data/
# Data directory (MongoDB, Redis, genesis files)
data/

# Logs directory
logs/
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ aggregator.log
/performance-test
bft-config
/data/
/logs/
.cache/

# Environment files with secrets
Expand Down
10 changes: 10 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -125,11 +125,21 @@ docker-restart-ha:
@echo "HA Aggregator services restarted"


docker-run-sh-clean-keep-tb:
@echo "Rebuilding services with clean state but preserving BFT config, with sharding enabled as current user..."
@docker compose -f sharding-compose.yml down
@rm -rf ./data/mongodb_shard1_data ./data/mongodb_shard2_data ./data/mongodb_root_data ./data/redis_shared_data
@mkdir -p ./data/genesis/root ./data/genesis-root ./data/mongodb_shard1_data ./data/mongodb_shard2_data ./data/mongodb_root_data ./data/redis_shared_data && chmod -R 777 ./data
@mkdir -p ./logs/shard1 ./logs/shard2 ./logs/root && chmod -R 777 ./logs
@USER_UID=$$(id -u) USER_GID=$$(id -g) LOG_LEVEL=debug docker compose -f sharding-compose.yml up --force-recreate -d --build
@echo "Services rebuilt with user UID=$$(id -u):$$(id -g)"

docker-run-sh-ha-clean-keep-tb:
@echo "Rebuilding services with clean state but preserving BFT config, with sharding and HA enabled as current user..."
@docker compose -f sharding-ha-compose.yml down
@rm -rf ./data/mongodb_shard1_data_1 ./data/mongodb_shard1_data_2 ./data/mongodb_shard1_data_3 ./data/mongodb_shard2_data_1 ./data/mongodb_shard2_data_2 ./data/mongodb_shard2_data_3 ./data/mongodb_root_data_1 ./data/mongodb_root_data_2 ./data/mongodb_root_data_3 ./data/redis_data
@mkdir -p ./data/genesis/root ./data/genesis-root ./data/mongodb_shard1_data_1 ./data/mongodb_shard1_data_2 ./data/mongodb_shard1_data_3 ./data/mongodb_shard2_data_1 ./data/mongodb_shard2_data_2 ./data/mongodb_shard2_data_3 ./data/mongodb_root_data_1 ./data/mongodb_root_data_2 ./data/mongodb_root_data_3 ./data/redis_data && chmod -R 777 ./data
@mkdir -p ./logs/shard1-1 ./logs/shard1-2 ./logs/shard2-1 ./logs/shard2-2 ./logs/root-1 && chmod -R 777 ./logs
@USER_UID=$$(id -u) USER_GID=$$(id -g) LOG_LEVEL=debug docker compose -f sharding-ha-compose.yml up --force-recreate -d --build
@echo "Services rebuilt with user UID=$$(id -u):$$(id -g)"

Expand Down
32 changes: 26 additions & 6 deletions cmd/aggregator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,17 @@ func main() {
}

// Initialize logger
baseLogger, err := logger.New(
cfg.Logging.Level,
cfg.Logging.Format,
cfg.Logging.Output,
cfg.Logging.EnableJSON,
)
baseLogger, err := logger.NewWithConfig(logger.LogConfig{
Level: cfg.Logging.Level,
Format: cfg.Logging.Format,
Output: cfg.Logging.Output,
EnableJSON: cfg.Logging.EnableJSON,
FilePath: cfg.Logging.FilePath,
MaxSizeMB: cfg.Logging.MaxSizeMB,
MaxBackups: cfg.Logging.MaxBackups,
MaxAgeDays: cfg.Logging.MaxAgeDays,
CompressBackups: cfg.Logging.CompressBackups,
})
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
os.Exit(1)
Expand All @@ -66,6 +71,16 @@ func main() {
log = baseLogger
}

// Log file logging configuration if enabled
if cfg.Logging.FilePath != "" {
log.WithComponent("main").Info("File logging enabled",
"filePath", cfg.Logging.FilePath,
"maxSizeMB", cfg.Logging.MaxSizeMB,
"maxBackups", cfg.Logging.MaxBackups,
"maxAgeDays", cfg.Logging.MaxAgeDays,
"compress", cfg.Logging.CompressBackups)
}

log.WithComponent("main").Info("Starting Unicity Aggregator")

// create global context
Expand Down Expand Up @@ -241,6 +256,11 @@ func main() {
if asyncLogger != nil {
asyncLogger.Stop()
}

// Close file logger if enabled
if err := baseLogger.Close(); err != nil {
fmt.Printf("Failed to close file logger: %v\n", err)
}
}

type leaderSelector interface {
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ require (
go.opentelemetry.io/otel/metric v1.38.0
go.opentelemetry.io/otel/trace v1.38.0
golang.org/x/net v0.45.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)

require (
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -852,6 +852,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
Expand Down
12 changes: 12 additions & 0 deletions internal/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,12 @@ type LoggingConfig struct {
EnableJSON bool `mapstructure:"enable_json"`
EnableAsync bool `mapstructure:"enable_async"`
AsyncBufferSize int `mapstructure:"async_buffer_size"`
// File logging with rotation
FilePath string `mapstructure:"file_path"` // Path to log file (empty = no file logging)
MaxSizeMB int `mapstructure:"max_size_mb"` // Max size in MB before rotation (default 100)
MaxBackups int `mapstructure:"max_backups"` // Max number of old log files to retain (default 30)
MaxAgeDays int `mapstructure:"max_age_days"` // Max days to retain old log files (default 30)
CompressBackups bool `mapstructure:"compress_backups"` // Compress rotated log files (default true)
}

// ProcessingConfig holds batch processing configuration
Expand Down Expand Up @@ -283,6 +289,12 @@ func Load() (*Config, error) {
EnableJSON: getEnvBoolOrDefault("LOG_ENABLE_JSON", true),
EnableAsync: getEnvBoolOrDefault("LOG_ENABLE_ASYNC", true),
AsyncBufferSize: getEnvIntOrDefault("LOG_ASYNC_BUFFER_SIZE", 10000),
// File logging with rotation
FilePath: getEnvOrDefault("LOG_FILE_PATH", ""),
MaxSizeMB: getEnvIntOrDefault("LOG_MAX_SIZE_MB", 100),
MaxBackups: getEnvIntOrDefault("LOG_MAX_BACKUPS", 30),
MaxAgeDays: getEnvIntOrDefault("LOG_MAX_AGE_DAYS", 30),
CompressBackups: getEnvBoolOrDefault("LOG_COMPRESS_BACKUPS", true),
},
Processing: ProcessingConfig{
BatchLimit: getEnvIntOrDefault("BATCH_LIMIT", 1000),
Expand Down
89 changes: 76 additions & 13 deletions internal/logger/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (
"log/slog"
"os"
"strings"

"gopkg.in/natefinch/lumberjack.v2"
)

// ContextKey type for context keys
Expand All @@ -21,13 +23,37 @@ const (
// Logger wraps slog.Logger with additional functionality
type Logger struct {
*slog.Logger
fileWriter io.Closer // holds reference to lumberjack for cleanup
}

// LogConfig holds the configuration for creating a logger
type LogConfig struct {
Level string
Format string
Output string
EnableJSON bool
FilePath string // Path to log file (empty = no file logging)
MaxSizeMB int // Max size in MB before rotation
MaxBackups int // Max number of old log files to retain
MaxAgeDays int // Max days to retain old log files
CompressBackups bool // Compress rotated log files
}

// New creates a new logger instance
func New(level, format, output string, enableJSON bool) (*Logger, error) {
return NewWithConfig(LogConfig{
Level: level,
Format: format,
Output: output,
EnableJSON: enableJSON,
})
}

// NewWithConfig creates a new logger instance with full configuration including file rotation
func NewWithConfig(cfg LogConfig) (*Logger, error) {
// Parse log level
var logLevel slog.Level
switch strings.ToLower(level) {
switch strings.ToLower(cfg.Level) {
case "debug":
logLevel = slog.LevelDebug
case "info":
Expand All @@ -40,19 +66,48 @@ func New(level, format, output string, enableJSON bool) (*Logger, error) {
logLevel = slog.LevelInfo
}

// Determine output writer
var writer io.Writer
switch output {
case "stdout", "":
writer = os.Stdout
// Determine console output writer
var consoleWriter io.Writer
switch cfg.Output {
case "stderr":
writer = os.Stderr
consoleWriter = os.Stderr
default:
file, err := os.OpenFile(output, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
return nil, err
consoleWriter = os.Stdout
}

// Build the final writer (console only, or multi-writer with file)
var writer io.Writer
var fileWriter *lumberjack.Logger

if cfg.FilePath != "" {
// Set defaults if not provided
maxSize := cfg.MaxSizeMB
if maxSize <= 0 {
maxSize = 100 // 100 MB default
}
writer = file
maxBackups := cfg.MaxBackups
if maxBackups <= 0 {
maxBackups = 30
}
maxAge := cfg.MaxAgeDays
if maxAge <= 0 {
maxAge = 30
}

// Create lumberjack logger for file rotation
fileWriter = &lumberjack.Logger{
Filename: cfg.FilePath,
MaxSize: maxSize, // megabytes
MaxBackups: maxBackups, // number of backups
MaxAge: maxAge, // days
Compress: cfg.CompressBackups,
LocalTime: true, // use local time for backup timestamps
}

// Combine console and file writers
writer = io.MultiWriter(consoleWriter, fileWriter)
} else {
writer = consoleWriter
}

// Create handler options
Expand All @@ -62,14 +117,22 @@ func New(level, format, output string, enableJSON bool) (*Logger, error) {

// Create handler based on format
var handler slog.Handler
if enableJSON || format == "json" {
if cfg.EnableJSON || cfg.Format == "json" {
handler = slog.NewJSONHandler(writer, opts)
} else {
handler = slog.NewTextHandler(writer, opts)
}

logger := slog.New(handler)
return &Logger{Logger: logger}, nil
return &Logger{Logger: logger, fileWriter: fileWriter}, nil
}

// Close closes any file writers (call on shutdown for clean log rotation)
func (l *Logger) Close() error {
if l.fileWriter != nil {
return l.fileWriter.Close()
}
return nil
}

// WithContext creates a new logger with context values
Expand Down
8 changes: 8 additions & 0 deletions sharding-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ services:
- default
volumes:
- ./data/genesis:/app/bft-config
- ./logs/shard1:/app/logs
- ./certs:/app/certs:ro
ulimits:
nofile:
Expand Down Expand Up @@ -200,6 +201,7 @@ services:
LOG_LEVEL: "debug"
LOG_FORMAT: "json"
LOG_ENABLE_JSON: "true"
LOG_FILE_PATH: "/app/logs/aggregator.log"

# Processing Configuration
BATCH_LIMIT: "1000"
Expand Down Expand Up @@ -273,6 +275,9 @@ services:
container_name: aggregator-shard2
ports:
- "3002:3000"
volumes:
- ./data/genesis:/app/bft-config
- ./logs/shard2:/app/logs
environment:
<<: *environment-base
MONGODB_URI: "mongodb://mongodb-shard2:27017/aggregator?replicaSet=rs-shard2&directConnection=true"
Expand All @@ -295,6 +300,9 @@ services:
container_name: aggregator-root
ports:
- "3009:3000"
volumes:
- ./data/genesis:/app/bft-config
- ./logs/root:/app/logs
environment:
<<: *environment-base
MONGODB_URI: "mongodb://mongodb-root:27017/aggregator?replicaSet=rs-root&directConnection=true"
Expand Down
14 changes: 14 additions & 0 deletions sharding-ha-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ services:
- default
volumes:
- ./data/genesis:/app/bft-config
- ./logs/shard1-1:/app/logs
depends_on:
- mongo-setup-shard1
- bft-aggregator-genesis-gen
Expand All @@ -308,6 +309,7 @@ services:
LOG_LEVEL: "debug"
LOG_FORMAT: "json"
LOG_ENABLE_JSON: "true"
LOG_FILE_PATH: "/app/logs/aggregator.log"
BATCH_LIMIT: "1000"
COLLECT_PHASE_DURATION: "500ms"
BFT_ENABLED: "false"
Expand Down Expand Up @@ -361,6 +363,9 @@ services:
container_name: aggregator-shard1-2
ports:
- "3012:3000"
volumes:
- ./data/genesis:/app/bft-config
- ./logs/shard1-2:/app/logs
environment:
<<: *environment-base
MONGODB_URI: "mongodb://mongodb-shard1-1:27017,mongodb-shard1-2:27017,mongodb-shard1-3:27017/aggregator?replicaSet=rs1"
Expand All @@ -375,6 +380,9 @@ services:
container_name: aggregator-shard2-1
ports:
- "3021:3000"
volumes:
- ./data/genesis:/app/bft-config
- ./logs/shard2-1:/app/logs
environment:
<<: *environment-base
MONGODB_URI: "mongodb://mongodb-shard2-1:27017,mongodb-shard2-2:27017,mongodb-shard2-3:27017/aggregator?replicaSet=rs2"
Expand All @@ -390,6 +398,9 @@ services:
container_name: aggregator-shard2-2
ports:
- "3022:3000"
volumes:
- ./data/genesis:/app/bft-config
- ./logs/shard2-2:/app/logs
environment:
<<: *environment-base
MONGODB_URI: "mongodb://mongodb-shard2-1:27017,mongodb-shard2-2:27017,mongodb-shard2-3:27017/aggregator?replicaSet=rs2"
Expand All @@ -410,6 +421,9 @@ services:
container_name: aggregator-root-1
ports:
- "3009:3000"
volumes:
- ./data/genesis:/app/bft-config
- ./logs/root-1:/app/logs
environment:
<<: *environment-base
MONGODB_URI: "mongodb://mongodb-root-1:27017,mongodb-root-2:27017,mongodb-root-3:27017/aggregator?replicaSet=rs-root"
Expand Down