feat(loganalyzer): complete tool suite for analysing erupe logs.

This commit is contained in:
Houmgaor
2025-11-18 00:02:02 +01:00
parent 79636af608
commit 7aafc71dcc
12 changed files with 2241 additions and 0 deletions

6
tools/loganalyzer/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
# Binary
loganalyzer
loganalyzer.exe
# Test files
*.test.log

View File

@@ -0,0 +1,60 @@
.PHONY: build clean install test help
# Default log file path
LOGFILE ?= ../../erupe.log
# Build the binary
build:
@echo "Building loganalyzer..."
@go build -o loganalyzer
# Install the binary to GOPATH/bin
install:
@echo "Installing loganalyzer..."
@go install
# Clean build artifacts
clean:
@echo "Cleaning..."
@rm -f loganalyzer
# Run quick tests
test: build
@echo "Running basic tests..."
@./loganalyzer stats -f $(LOGFILE) > /dev/null && echo "✓ stats command works"
@./loganalyzer errors -f $(LOGFILE) -summary > /dev/null && echo "✓ errors command works"
@./loganalyzer connections -f $(LOGFILE) > /dev/null && echo "✓ connections command works"
@./loganalyzer filter -f $(LOGFILE) -count > /dev/null && echo "✓ filter command works"
@echo "All tests passed!"
# Quick stats
stats: build
@./loganalyzer stats -f $(LOGFILE)
# Quick error summary
errors: build
@./loganalyzer errors -f $(LOGFILE) -summary
# Show help
help:
@echo "Erupe Log Analyzer - Makefile"
@echo ""
@echo "Targets:"
@echo " build - Build the binary (default)"
@echo " install - Install to GOPATH/bin"
@echo " clean - Remove build artifacts"
@echo " test - Run basic functionality tests"
@echo " stats - Quick stats of log file"
@echo " errors - Quick error summary"
@echo " help - Show this help"
@echo ""
@echo "Variables:"
@echo " LOGFILE - Path to log file (default: ../../erupe.log)"
@echo ""
@echo "Examples:"
@echo " make build"
@echo " make stats"
@echo " make test LOGFILE=/path/to/custom.log"
# Default target
.DEFAULT_GOAL := build

View File

@@ -0,0 +1,119 @@
# Quick Reference Guide
## Installation
```bash
cd tools/loganalyzer
go build -o loganalyzer
```
## Quick Commands
### View Statistics
```bash
./loganalyzer stats -f ../../erupe.log
./loganalyzer stats -f ../../erupe.log -detailed
```
### Filter Logs
```bash
# Errors only
./loganalyzer filter -f ../../erupe.log -level error
# Last hour
./loganalyzer filter -f ../../erupe.log -since 1h
# Last 50 entries
./loganalyzer filter -f ../../erupe.log -tail 50
# Search message
./loganalyzer filter -f ../../erupe.log -msg "connection reset"
```
### Analyze Errors
```bash
# Error summary
./loganalyzer errors -f ../../erupe.log -summary
# Detailed with stack traces
./loganalyzer errors -f ../../erupe.log -detailed -stack
```
### Track Connections
```bash
# Connection stats
./loganalyzer connections -f ../../erupe.log
# Player sessions
./loganalyzer connections -f ../../erupe.log -sessions
# Specific player
./loganalyzer connections -f ../../erupe.log -player "PlayerName" -sessions -v
```
### Follow Logs
```bash
# Like tail -f
./loganalyzer tail -f ../../erupe.log
# Only errors
./loganalyzer tail -f ../../erupe.log -level error
```
## Common Workflows
### Troubleshooting a crash
```bash
# 1. Check recent errors
./loganalyzer filter -f erupe.log -level error -tail 20
# 2. Analyze error patterns
./loganalyzer errors -f erupe.log -detailed -stack
# 3. Check what was happening before crash
./loganalyzer filter -f erupe.log -since "2025-11-12T23:00:00Z" -tail 100
```
### Player investigation
```bash
# 1. Find player sessions
./loganalyzer connections -f erupe.log -player "PlayerName" -sessions -v
# 2. Check errors for that player
./loganalyzer filter -f erupe.log -logger "*PlayerName*"
```
### Monitoring
```bash
# Real-time error monitoring
./loganalyzer tail -f erupe.log -level error
# Daily statistics
./loganalyzer stats -f erupe.log -detailed
```
## Tips
1. **Pipe to less for long output**: `./loganalyzer filter -f erupe.log | less -R`
2. **Save to file**: `./loganalyzer stats -f erupe.log > stats.txt`
3. **Combine with grep**: `./loganalyzer filter -f erupe.log -level error | grep "mail"`
4. **Use -count for quick checks**: `./loganalyzer filter -f erupe.log -level error -count`
5. **Time ranges**: `-since` accepts both absolute (RFC3339) and relative (1h, 30m) times
## Output Format
Default output is colorized:
- Errors: Red
- Warnings: Yellow
- Info: Green
Disable colors with `-color=false` for piping to files.

319
tools/loganalyzer/README.md Normal file
View File

@@ -0,0 +1,319 @@
# Erupe Log Analyzer
A comprehensive suite of Go tools to analyze Erupe server logs (`erupe.log`).
## Features
- **Filter logs** by level, logger, message content, or time range
- **Analyze errors** with grouping, statistics, and stack trace display
- **Track connections** and player sessions with detailed statistics
- **Generate statistics** about log activity, operations, and patterns
- **Tail logs** in real-time like `tail -f`
## Installation
```bash
cd tools/loganalyzer
go build -o loganalyzer
```
This will create a `loganalyzer` binary in the current directory.
## Usage
The tool provides multiple commands, each with its own options:
```bash
./loganalyzer <command> [options]
```
### Commands
#### 1. `filter` - Filter logs by various criteria
Filter logs by level, logger, message content, or time range.
**Examples:**
```bash
# Show only errors
./loganalyzer filter -f ../../erupe.log -level error
# Show warnings from the last hour
./loganalyzer filter -f ../../erupe.log -level warn -since 1h
# Filter by logger (supports wildcards)
./loganalyzer filter -f ../../erupe.log -logger "channel-4*"
# Search for specific message content
./loganalyzer filter -f ../../erupe.log -msg "connection reset"
# Show only last 50 entries
./loganalyzer filter -f ../../erupe.log -tail 50
# Count matching entries without displaying them
./loganalyzer filter -f ../../erupe.log -level error -count
```
**Options:**
- `-f` - Path to log file (default: `erupe.log`)
- `-level` - Filter by log level (info, warn, error, fatal)
- `-logger` - Filter by logger name (supports wildcards with *)
- `-msg` - Filter by message content (case-insensitive)
- `-since` - Show logs since this time (RFC3339 or duration like '1h', '30m')
- `-until` - Show logs until this time (RFC3339)
- `-color` - Colorize output (default: true)
- `-count` - Only show count of matching entries
- `-tail` - Show last N entries
#### 2. `errors` - Analyze errors and warnings
Extract and analyze errors with grouping by message, caller, or logger.
**Examples:**
```bash
# Show error summary grouped by message
./loganalyzer errors -f ../../erupe.log -summary
# Show detailed error information with examples
./loganalyzer errors -f ../../erupe.log -detailed
# Show errors with stack traces
./loganalyzer errors -f ../../erupe.log -stack -detailed
# Group errors by caller instead of message
./loganalyzer errors -f ../../erupe.log -group caller -summary
# Show more examples per error group
./loganalyzer errors -f ../../erupe.log -detailed -limit 20
```
**Options:**
- `-f` - Path to log file (default: `erupe.log`)
- `-group` - Group errors by: message, caller, or logger (default: message)
- `-stack` - Show stack traces
- `-limit` - Limit number of examples per error group (default: 10)
- `-summary` - Show summary only (grouped by error type)
- `-detailed` - Show detailed error information
#### 3. `connections` - Analyze player connections and sessions
Track connection events, player sessions, and connection statistics.
**Examples:**
```bash
# Show connection statistics
./loganalyzer connections -f ../../erupe.log
# Show individual player sessions
./loganalyzer connections -f ../../erupe.log -sessions
# Show detailed session information
./loganalyzer connections -f ../../erupe.log -sessions -v
# Filter by player name
./loganalyzer connections -f ../../erupe.log -player "Sarah" -sessions
# Show only statistics without sessions
./loganalyzer connections -f ../../erupe.log -stats -sessions=false
```
**Options:**
- `-f` - Path to log file (default: `erupe.log`)
- `-player` - Filter by player name
- `-sessions` - Show individual player sessions
- `-stats` - Show connection statistics (default: true)
- `-v` - Verbose output
**Statistics provided:**
- Total connections
- Unique players and IP addresses
- Channel distribution
- Connections per day
- Top IP addresses
- Disconnect reasons
#### 4. `stats` - Generate comprehensive statistics
Analyze overall log statistics, activity patterns, and operation counts.
**Examples:**
```bash
# Show basic statistics
./loganalyzer stats -f ../../erupe.log
# Show detailed statistics including top loggers and messages
./loganalyzer stats -f ../../erupe.log -detailed
# Show top 20 instead of default 10
./loganalyzer stats -f ../../erupe.log -detailed -top 20
```
**Options:**
- `-f` - Path to log file (default: `erupe.log`)
- `-top` - Show top N messages/loggers (default: 10)
- `-detailed` - Show detailed statistics
**Statistics provided:**
- Total log entries and time range
- Entries by log level
- Operation counts (saves, broadcasts, stage changes)
- Top loggers and messages
- Activity by day and hour
- Unique callers
#### 5. `tail` - Follow logs in real-time
Watch log file for new entries, similar to `tail -f`.
**Examples:**
```bash
# Follow log file showing last 10 lines first
./loganalyzer tail -f ../../erupe.log
# Show last 50 lines and follow
./loganalyzer tail -f ../../erupe.log -n 50
# Follow only errors
./loganalyzer tail -f ../../erupe.log -level error
# Don't follow, just show last 20 lines
./loganalyzer tail -f ../../erupe.log -n 20 -follow=false
```
**Options:**
- `-f` - Path to log file (default: `erupe.log`)
- `-n` - Number of initial lines to show (default: 10)
- `-follow` - Follow the log file (default: true)
- `-level` - Filter by log level
- `-color` - Colorize output (default: true)
## Common Use Cases
### Finding the cause of a server crash
```bash
# Look for errors around a specific time
./loganalyzer filter -f erupe.log -level error -since "2025-11-12T23:00:00Z"
# Analyze all errors with stack traces
./loganalyzer errors -f erupe.log -stack -detailed
```
### Analyzing player activity
```bash
# See which players connected today
./loganalyzer connections -f erupe.log -sessions -v
# Find all activity for a specific player
./loganalyzer connections -f erupe.log -player "Sarah" -sessions -v
```
### Monitoring server health
```bash
# Real-time monitoring of errors
./loganalyzer tail -f erupe.log -level error
# Check overall statistics
./loganalyzer stats -f erupe.log -detailed
# Analyze connection patterns
./loganalyzer connections -f erupe.log -stats
```
### Investigating specific issues
```bash
# Find all connection reset errors
./loganalyzer filter -f erupe.log -msg "connection reset"
# Analyze database errors
./loganalyzer errors -f erupe.log -group caller | grep -i database
# Check activity during peak hours
./loganalyzer stats -f erupe.log -detailed
```
## Log Format Support
The tool supports both log formats found in Erupe logs:
1. **JSON format** (structured logs):
```json
{"level":"info","ts":1762989571.547817,"logger":"main","caller":"Erupe/main.go:57","msg":"Starting Erupe"}
```
2. **Timestamp format** (simple logs):
```text
2025-11-12T23:19:31.546Z INFO commands Command Help: Enabled
```
## Performance
The tool uses streaming parsing to handle large log files efficiently:
- Memory-efficient streaming for filter and stats commands
- Fast pattern matching for message filtering
- Handles log files with millions of entries
## Output
By default, output is colorized for better readability:
- **Errors** are displayed in red
- **Warnings** are displayed in yellow
- **Info** messages are displayed in green
Colorization can be disabled with `-color=false` for piping to files or other tools.
## Tips
1. Use `-count` with filter to quickly see how many entries match without displaying them all
2. Combine `filter` with `grep` for more complex searches: `./loganalyzer filter -f erupe.log | grep pattern`
3. Use `-tail` to limit output when exploring logs interactively
4. The `-since` option accepts both absolute timestamps and relative durations (1h, 30m, 24h)
5. Use `-summary` with errors command for a quick overview before diving into details
## Building from Source
```bash
cd tools/loganalyzer
go build -o loganalyzer
```
Or to install it system-wide:
```bash
go install
```
## Contributing
Feel free to add new commands or improve existing ones. The codebase is modular:
- `parser.go` - Log parsing logic
- `filter.go` - Filter command
- `errors.go` - Error analysis command
- `connections.go` - Connection tracking command
- `stats.go` - Statistics generation
- `tail.go` - Real-time log following
- `main.go` - Command routing
## License
Part of the Erupe project.

View File

@@ -0,0 +1,459 @@
package main
import (
"flag"
"fmt"
"os"
"sort"
"strings"
"time"
)
// PlayerSession represents a single player's connection session to the server.
//
// A session is identified by the combination of channel and IP:port, tracking
// all activities from when a player connects until they disconnect.
type PlayerSession struct {
Name string // Player name
IPPort string // Client IP address and port (e.g., "192.168.1.1:12345")
Channel string // Server channel (e.g., "channel-4")
FirstSeen time.Time // Timestamp of first activity
LastSeen time.Time // Timestamp of last activity
Activities []string // List of player activities
Stages []string // List of stage changes
Objects []string // List of objects broadcast by this player
Errors int // Number of errors encountered during session
SaveCount int // Number of save operations performed
}
// ConnectionStats aggregates statistics about player connections across all sessions.
//
// This structure tracks high-level metrics useful for understanding server usage
// patterns, peak times, and common connection issues.
type ConnectionStats struct {
TotalConnections int // Total number of player sessions
UniqueIPs map[string]int // IP addresses to connection count
UniquePlayers map[string]bool // Set of unique player names
ConnectionsPerDay map[string]int // Date to connection count
ChannelDistribution map[string]int // Channel to connection count
DisconnectReasons map[string]int // Disconnect reason to count
}
// runConnections implements the connections command for analyzing player connection patterns.
//
// The connections command tracks player sessions from connection to disconnection, providing
// both aggregate statistics and individual session details. It can identify patterns in
// player activity, track connection issues, and analyze channel usage.
//
// Features:
// - Tracks individual player sessions with timestamps and activities
// - Aggregates connection statistics (total, unique players, IPs)
// - Shows channel distribution and peak connection times
// - Analyzes disconnect reasons
// - Supports filtering by player name
// - Provides verbose session details including objects and stage changes
//
// Options:
// - f: Path to log file (default: "erupe.log")
// - player: Filter sessions by player name (case-insensitive substring match)
// - sessions: Show individual player sessions
// - stats: Show connection statistics (default: true)
// - v: Verbose output including objects and stage changes
//
// Examples:
// runConnections([]string{"-stats"})
// runConnections([]string{"-sessions", "-v"})
// runConnections([]string{"-player", "Sarah", "-sessions"})
func runConnections(args []string) {
fs := flag.NewFlagSet("connections", flag.ExitOnError)
logFile := fs.String("f", "erupe.log", "Path to log file")
player := fs.String("player", "", "Filter by player name")
showSessions := fs.Bool("sessions", false, "Show individual player sessions")
showStats := fs.Bool("stats", true, "Show connection statistics")
verbose := fs.Bool("v", false, "Verbose output")
fs.Parse(args)
stats := &ConnectionStats{
UniqueIPs: make(map[string]int),
UniquePlayers: make(map[string]bool),
ConnectionsPerDay: make(map[string]int),
ChannelDistribution: make(map[string]int),
DisconnectReasons: make(map[string]int),
}
sessions := make(map[string]*PlayerSession) // key: channel-IP:port
err := StreamLogFile(*logFile, func(entry *LogEntry) error {
// Track player activities
if strings.Contains(entry.Message, "Sending existing stage objects to") {
// Extract player name
parts := strings.Split(entry.Message, " to ")
if len(parts) == 2 {
playerName := strings.TrimSpace(parts[1])
// Extract IP:port and channel from logger
sessionKey := extractSessionKey(entry.Logger)
if sessionKey != "" {
session, exists := sessions[sessionKey]
if !exists {
session = &PlayerSession{
Name: playerName,
IPPort: extractIPPort(entry.Logger),
Channel: extractChannel(entry.Logger),
FirstSeen: entry.Timestamp,
Activities: make([]string, 0),
Stages: make([]string, 0),
Objects: make([]string, 0),
}
sessions[sessionKey] = session
stats.TotalConnections++
stats.UniquePlayers[playerName] = true
if session.IPPort != "" {
ip := strings.Split(session.IPPort, ":")[0]
stats.UniqueIPs[ip]++
}
if session.Channel != "" {
stats.ChannelDistribution[session.Channel]++
}
day := entry.Timestamp.Format("2006-01-02")
stats.ConnectionsPerDay[day]++
}
session.LastSeen = entry.Timestamp
session.Activities = append(session.Activities, entry.Message)
}
}
}
// Track broadcasts
if strings.Contains(entry.Message, "Broadcasting new object:") {
sessionKey := extractSessionKey(entry.Logger)
if session, exists := sessions[sessionKey]; exists {
parts := strings.Split(entry.Message, "Broadcasting new object: ")
if len(parts) == 2 {
session.Objects = append(session.Objects, parts[1])
}
}
}
// Track stage changes
if strings.Contains(entry.Message, "Sending notification to old stage clients") {
sessionKey := extractSessionKey(entry.Logger)
if session, exists := sessions[sessionKey]; exists {
session.Stages = append(session.Stages, "Stage changed")
}
}
// Track save operations
if strings.Contains(entry.Message, "Wrote recompressed savedata back to DB") {
sessionKey := extractSessionKey(entry.Logger)
if session, exists := sessions[sessionKey]; exists {
session.SaveCount++
}
}
// Track disconnections
if strings.Contains(entry.Message, "Error on ReadPacket, exiting recv loop") ||
strings.Contains(entry.Message, "Error reading packet") {
sessionKey := extractSessionKey(entry.Logger)
if session, exists := sessions[sessionKey]; exists {
session.Errors++
}
// Extract disconnect reason
if entry.Error != "" {
reason := entry.Error
if strings.Contains(reason, "connection reset by peer") {
reason = "connection reset by peer"
} else if strings.Contains(reason, "timeout") {
reason = "timeout"
}
stats.DisconnectReasons[reason]++
}
}
return nil
})
if err != nil {
fmt.Fprintf(os.Stderr, "Error processing log file: %v\n", err)
os.Exit(1)
}
// Filter by player if specified
if *player != "" {
filteredSessions := make(map[string]*PlayerSession)
for key, session := range sessions {
if strings.Contains(strings.ToLower(session.Name), strings.ToLower(*player)) {
filteredSessions[key] = session
}
}
sessions = filteredSessions
}
// Display results
if *showStats {
printConnectionStats(stats)
}
if *showSessions {
printPlayerSessions(sessions, *verbose)
}
}
// printConnectionStats displays aggregate connection statistics in a formatted report.
//
// The report includes:
// - Total connections and unique player/IP counts
// - Channel distribution showing which channels are most popular
// - Connections per day to identify peak usage days
// - Disconnect reasons to identify common connection issues
// - Top IP addresses by connection count
//
// All sections are sorted for easy analysis (channels alphabetically,
// days chronologically, others by frequency).
//
// Parameters:
// - stats: ConnectionStats structure containing aggregated data
func printConnectionStats(stats *ConnectionStats) {
fmt.Printf("=== Connection Statistics ===\n\n")
fmt.Printf("Total Connections: %d\n", stats.TotalConnections)
fmt.Printf("Unique Players: %d\n", len(stats.UniquePlayers))
fmt.Printf("Unique IP Addresses: %d\n", len(stats.UniqueIPs))
if len(stats.ChannelDistribution) > 0 {
fmt.Printf("\n--- Channel Distribution ---\n")
// Sort channels
type channelPair struct {
name string
count int
}
var channels []channelPair
for name, count := range stats.ChannelDistribution {
channels = append(channels, channelPair{name, count})
}
sort.Slice(channels, func(i, j int) bool {
return channels[i].name < channels[j].name
})
for _, ch := range channels {
fmt.Printf(" %s: %d connections\n", ch.name, ch.count)
}
}
if len(stats.ConnectionsPerDay) > 0 {
fmt.Printf("\n--- Connections Per Day ---\n")
// Sort by date
type dayPair struct {
date string
count int
}
var days []dayPair
for date, count := range stats.ConnectionsPerDay {
days = append(days, dayPair{date, count})
}
sort.Slice(days, func(i, j int) bool {
return days[i].date < days[j].date
})
for _, day := range days {
fmt.Printf(" %s: %d connections\n", day.date, day.count)
}
}
if len(stats.DisconnectReasons) > 0 {
fmt.Printf("\n--- Disconnect Reasons ---\n")
// Sort by count
type reasonPair struct {
reason string
count int
}
var reasons []reasonPair
for reason, count := range stats.DisconnectReasons {
reasons = append(reasons, reasonPair{reason, count})
}
sort.Slice(reasons, func(i, j int) bool {
return reasons[i].count > reasons[j].count
})
for _, r := range reasons {
fmt.Printf(" %s: %d times\n", r.reason, r.count)
}
}
if len(stats.UniqueIPs) > 0 {
fmt.Printf("\n--- Top IP Addresses ---\n")
type ipPair struct {
ip string
count int
}
var ips []ipPair
for ip, count := range stats.UniqueIPs {
ips = append(ips, ipPair{ip, count})
}
sort.Slice(ips, func(i, j int) bool {
return ips[i].count > ips[j].count
})
// Show top 10
limit := 10
if len(ips) < limit {
limit = len(ips)
}
for i := 0; i < limit; i++ {
fmt.Printf(" %s: %d connections\n", ips[i].ip, ips[i].count)
}
}
}
// printPlayerSessions displays detailed information about individual player sessions.
//
// For each session, displays:
// - Player name, channel, and IP:port
// - Connection duration (first seen to last seen)
// - Number of save operations and errors
// - Objects and stage changes (if verbose=true)
//
// Sessions are sorted chronologically by first seen time.
//
// Parameters:
// - sessions: Map of session keys to PlayerSession data
// - verbose: Whether to show detailed activity information
func printPlayerSessions(sessions map[string]*PlayerSession, verbose bool) {
// Sort sessions by first seen
type sessionPair struct {
key string
session *PlayerSession
}
var pairs []sessionPair
for key, session := range sessions {
pairs = append(pairs, sessionPair{key, session})
}
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].session.FirstSeen.Before(pairs[j].session.FirstSeen)
})
fmt.Printf("\n\n=== Player Sessions ===\n")
fmt.Printf("Total Sessions: %d\n\n", len(sessions))
for idx, pair := range pairs {
session := pair.session
duration := session.LastSeen.Sub(session.FirstSeen)
fmt.Printf("%s\n", strings.Repeat("-", 80))
fmt.Printf("Session #%d: %s\n", idx+1, session.Name)
fmt.Printf("%s\n", strings.Repeat("-", 80))
fmt.Printf("Channel: %s\n", session.Channel)
fmt.Printf("IP:Port: %s\n", session.IPPort)
fmt.Printf("First Seen: %s\n", session.FirstSeen.Format("2006-01-02 15:04:05"))
fmt.Printf("Last Seen: %s\n", session.LastSeen.Format("2006-01-02 15:04:05"))
fmt.Printf("Duration: %s\n", formatDuration(duration))
fmt.Printf("Save Operations: %d\n", session.SaveCount)
fmt.Printf("Errors: %d\n", session.Errors)
if verbose {
if len(session.Objects) > 0 {
fmt.Printf("\nObjects: %s\n", strings.Join(session.Objects, ", "))
}
if len(session.Stages) > 0 {
fmt.Printf("Stage Changes: %d\n", len(session.Stages))
}
}
fmt.Println()
}
}
// extractSessionKey extracts a unique session identifier from a logger string.
//
// Logger format: "main.channel-X.IP:port"
// Returns: "channel-X.IP:port"
//
// This key uniquely identifies a player session by combining the channel
// and the client's IP:port combination.
//
// Parameters:
// - logger: The logger field from a log entry
//
// Returns:
// - A session key string, or empty string if the format is invalid
func extractSessionKey(logger string) string {
// Logger format: "main.channel-X.IP:port"
parts := strings.Split(logger, ".")
if len(parts) >= 3 {
return strings.Join(parts[1:], ".")
}
return ""
}
// extractIPPort extracts the client IP address and port from a logger string.
//
// Logger format: "main.channel-X.A.B.C.D:port" where A.B.C.D is the IPv4 address
// Returns: "A.B.C.D:port"
//
// Parameters:
// - logger: The logger field from a log entry
//
// Returns:
// - The IP:port string, or empty string if extraction fails
func extractIPPort(logger string) string {
parts := strings.Split(logger, ".")
if len(parts) >= 4 {
// Last part might be IP:port
lastPart := parts[len(parts)-1]
if strings.Contains(lastPart, ":") {
// Reconstruct IP:port (handle IPv4)
if len(parts) >= 4 {
ip := strings.Join(parts[len(parts)-4:len(parts)-1], ".")
port := lastPart
return ip + ":" + port
}
}
}
return ""
}
// extractChannel extracts the channel name from a logger string.
//
// Logger format: "main.channel-X.IP:port"
// Returns: "channel-X"
//
// Parameters:
// - logger: The logger field from a log entry
//
// Returns:
// - The channel name (e.g., "channel-4"), or empty string if not found
func extractChannel(logger string) string {
if strings.Contains(logger, "channel-") {
parts := strings.Split(logger, "channel-")
if len(parts) >= 2 {
channelPart := strings.Split(parts[1], ".")[0]
return "channel-" + channelPart
}
}
return ""
}
// formatDuration formats a time duration into a human-readable string.
//
// The format varies based on duration:
// - Less than 1 minute: "N seconds"
// - Less than 1 hour: "N.N minutes"
// - 1 hour or more: "N.N hours"
//
// Parameters:
// - d: The duration to format
//
// Returns:
// - A human-readable string representation of the duration
func formatDuration(d time.Duration) string {
if d < time.Minute {
return fmt.Sprintf("%.0f seconds", d.Seconds())
} else if d < time.Hour {
return fmt.Sprintf("%.1f minutes", d.Minutes())
} else {
return fmt.Sprintf("%.1f hours", d.Hours())
}
}

269
tools/loganalyzer/errors.go Normal file
View File

@@ -0,0 +1,269 @@
package main
import (
"flag"
"fmt"
"os"
"sort"
"strings"
)
// ErrorGroup represents a collection of similar errors grouped together.
//
// Errors can be grouped by message, caller, or logger to identify patterns
// and recurring issues in the logs.
type ErrorGroup struct {
Message string // Primary message for this error group
Count int // Total number of occurrences
FirstSeen string // Timestamp of first occurrence
LastSeen string // Timestamp of last occurrence
Examples []*LogEntry // Sample log entries (limited by the limit flag)
Callers map[string]int // Map of caller locations to occurrence counts
}
// runErrors implements the errors command for extracting and analyzing errors.
//
// The errors command processes log files to find all errors and warnings, groups them
// by a specified criterion (message, caller, or logger), and presents statistics and
// examples for each group.
//
// Features:
// - Groups errors by message (default), caller, or logger
// - Shows total error and warning counts
// - Displays first and last occurrence timestamps
// - Optionally shows stack traces for detailed debugging
// - Provides summary or detailed views
// - Tracks which callers produced each error
//
// Options:
// - f: Path to log file (default: "erupe.log")
// - group: Group errors by "message", "caller", or "logger" (default: "message")
// - stack: Show stack traces in detailed view
// - limit: Maximum number of example entries per group (default: 10)
// - summary: Show summary table only
// - detailed: Show detailed information including examples and extra data
//
// Examples:
// runErrors([]string{"-summary"})
// runErrors([]string{"-detailed", "-stack"})
// runErrors([]string{"-group", "caller", "-limit", "20"})
func runErrors(args []string) {
fs := flag.NewFlagSet("errors", flag.ExitOnError)
logFile := fs.String("f", "erupe.log", "Path to log file")
groupBy := fs.String("group", "message", "Group errors by: message, caller, or logger")
showStack := fs.Bool("stack", false, "Show stack traces")
limit := fs.Int("limit", 10, "Limit number of examples per error group")
summary := fs.Bool("summary", false, "Show summary only (grouped by error type)")
detailed := fs.Bool("detailed", false, "Show detailed error information")
fs.Parse(args)
errorGroups := make(map[string]*ErrorGroup)
var totalErrors int
var totalWarnings int
err := StreamLogFile(*logFile, func(entry *LogEntry) error {
// Only process errors and warnings
if entry.Level != "error" && entry.Level != "warn" {
return nil
}
if entry.Level == "error" {
totalErrors++
} else {
totalWarnings++
}
// Determine grouping key
var key string
switch *groupBy {
case "message":
key = entry.Message
case "caller":
key = entry.Caller
case "logger":
key = entry.Logger
default:
key = entry.Message
}
// Create or update error group
group, exists := errorGroups[key]
if !exists {
group = &ErrorGroup{
Message: entry.Message,
Callers: make(map[string]int),
Examples: make([]*LogEntry, 0),
FirstSeen: entry.Timestamp.Format("2006-01-02 15:04:05"),
}
errorGroups[key] = group
}
group.Count++
group.LastSeen = entry.Timestamp.Format("2006-01-02 15:04:05")
if entry.Caller != "" {
group.Callers[entry.Caller]++
}
// Store example (limit to avoid memory issues)
if len(group.Examples) < *limit {
group.Examples = append(group.Examples, entry)
}
return nil
})
if err != nil {
fmt.Fprintf(os.Stderr, "Error processing log file: %v\n", err)
os.Exit(1)
}
// Print results
fmt.Printf("=== Error Analysis ===\n")
fmt.Printf("Total Errors: %d\n", totalErrors)
fmt.Printf("Total Warnings: %d\n", totalWarnings)
fmt.Printf("Unique Error Groups: %d\n\n", len(errorGroups))
if *summary {
printErrorSummary(errorGroups)
} else {
printDetailedErrors(errorGroups, *showStack, *detailed)
}
}
// printErrorSummary displays a tabular summary of error groups sorted by occurrence count.
//
// The summary table includes:
// - Error message (truncated to 60 characters if longer)
// - Total count of occurrences
// - First seen timestamp
// - Last seen timestamp
//
// Groups are sorted by count in descending order (most frequent first).
//
// Parameters:
// - groups: Map of error groups to summarize
func printErrorSummary(groups map[string]*ErrorGroup) {
// Sort by count
type groupPair struct {
key string
group *ErrorGroup
}
var pairs []groupPair
for key, group := range groups {
pairs = append(pairs, groupPair{key, group})
}
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].group.Count > pairs[j].group.Count
})
fmt.Printf("%-60s | %-8s | %-19s | %-19s\n", "Error Message", "Count", "First Seen", "Last Seen")
fmt.Println(strings.Repeat("-", 120))
for _, pair := range pairs {
msg := pair.group.Message
if len(msg) > 60 {
msg = msg[:57] + "..."
}
fmt.Printf("%-60s | %-8d | %-19s | %-19s\n",
msg,
pair.group.Count,
pair.group.FirstSeen,
pair.group.LastSeen)
}
}
// printDetailedErrors displays comprehensive information about each error group.
//
// For each error group, displays:
// - Group number and occurrence count
// - Error message
// - First and last seen timestamps
// - Caller locations with counts
// - Example occurrences with full details (if detailed=true)
// - Stack traces (if showStack=true and available)
//
// Groups are sorted by occurrence count in descending order.
//
// Parameters:
// - groups: Map of error groups to display
// - showStack: Whether to include stack traces in the output
// - detailed: Whether to show example occurrences and extra data
func printDetailedErrors(groups map[string]*ErrorGroup, showStack, detailed bool) {
// Sort by count
type groupPair struct {
key string
group *ErrorGroup
}
var pairs []groupPair
for key, group := range groups {
pairs = append(pairs, groupPair{key, group})
}
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].group.Count > pairs[j].group.Count
})
for idx, pair := range pairs {
fmt.Printf("\n%s\n", strings.Repeat("=", 80))
fmt.Printf("Error Group #%d (Count: %d)\n", idx+1, pair.group.Count)
fmt.Printf("%s\n", strings.Repeat("=", 80))
fmt.Printf("Message: %s\n", pair.group.Message)
fmt.Printf("First Seen: %s\n", pair.group.FirstSeen)
fmt.Printf("Last Seen: %s\n", pair.group.LastSeen)
if len(pair.group.Callers) > 0 {
fmt.Printf("\nCallers:\n")
// Sort callers by count
type callerPair struct {
name string
count int
}
var callers []callerPair
for name, count := range pair.group.Callers {
callers = append(callers, callerPair{name, count})
}
sort.Slice(callers, func(i, j int) bool {
return callers[i].count > callers[j].count
})
for _, c := range callers {
fmt.Printf(" %s: %d times\n", c.name, c.count)
}
}
if detailed && len(pair.group.Examples) > 0 {
fmt.Printf("\nExample occurrences:\n")
for i, example := range pair.group.Examples {
fmt.Printf("\n [Example %d] %s\n", i+1, example.Timestamp.Format("2006-01-02 15:04:05.000"))
fmt.Printf(" Logger: %s\n", example.Logger)
if example.Caller != "" {
fmt.Printf(" Caller: %s\n", example.Caller)
}
if example.Error != "" {
fmt.Printf(" Error: %s\n", example.Error)
}
// Print extra data
if len(example.ExtraData) > 0 {
fmt.Printf(" Extra Data:\n")
for k, v := range example.ExtraData {
fmt.Printf(" %s: %v\n", k, v)
}
}
if showStack && example.StackTrace != "" {
fmt.Printf(" Stack Trace:\n")
lines := strings.Split(example.StackTrace, "\n")
for _, line := range lines {
fmt.Printf(" %s\n", line)
}
}
}
}
}
}

182
tools/loganalyzer/filter.go Normal file
View File

@@ -0,0 +1,182 @@
package main
import (
"flag"
"fmt"
"os"
"strings"
"time"
)
// runFilter implements the filter command for filtering log entries by various criteria.
//
// The filter command supports the following filters:
// - level: Filter by log level (info, warn, error, fatal)
// - logger: Filter by logger name (supports wildcards with *)
// - msg: Filter by message content (case-insensitive substring match)
// - since: Show logs since this time (RFC3339 format or duration like "1h", "30m")
// - until: Show logs until this time (RFC3339 format)
// - tail: Show only the last N matching entries
// - count: Show only the count of matching entries instead of the entries themselves
// - color: Enable/disable colorized output (default: true)
//
// All filters are combined with AND logic.
//
// Examples:
// runFilter([]string{"-level", "error"})
// runFilter([]string{"-since", "1h", "-logger", "channel-4*"})
// runFilter([]string{"-msg", "connection reset", "-count"})
func runFilter(args []string) {
fs := flag.NewFlagSet("filter", flag.ExitOnError)
logFile := fs.String("f", "erupe.log", "Path to log file")
level := fs.String("level", "", "Filter by log level (info, warn, error, fatal)")
logger := fs.String("logger", "", "Filter by logger name (supports wildcards)")
message := fs.String("msg", "", "Filter by message content (case-insensitive)")
sinceStr := fs.String("since", "", "Show logs since this time (RFC3339 or duration like '1h')")
untilStr := fs.String("until", "", "Show logs until this time (RFC3339)")
colorize := fs.Bool("color", true, "Colorize output")
count := fs.Bool("count", false, "Only show count of matching entries")
tail := fs.Int("tail", 0, "Show last N entries")
fs.Parse(args)
// Parse time filters
var since, until time.Time
var err error
if *sinceStr != "" {
// Try parsing as duration first
if duration, err := time.ParseDuration(*sinceStr); err == nil {
since = time.Now().Add(-duration)
} else if since, err = time.Parse(time.RFC3339, *sinceStr); err != nil {
fmt.Fprintf(os.Stderr, "Invalid since time format: %s\n", *sinceStr)
os.Exit(1)
}
}
if *untilStr != "" {
if until, err = time.Parse(time.RFC3339, *untilStr); err != nil {
fmt.Fprintf(os.Stderr, "Invalid until time format: %s\n", *untilStr)
os.Exit(1)
}
}
// Collect matching entries
var matches []*LogEntry
var totalCount int
err = StreamLogFile(*logFile, func(entry *LogEntry) error {
totalCount++
// Apply filters
if *level != "" && !strings.EqualFold(entry.Level, *level) {
return nil
}
if *logger != "" && !matchWildcard(entry.Logger, *logger) {
return nil
}
if *message != "" && !strings.Contains(strings.ToLower(entry.Message), strings.ToLower(*message)) {
return nil
}
if !since.IsZero() && entry.Timestamp.Before(since) {
return nil
}
if !until.IsZero() && entry.Timestamp.After(until) {
return nil
}
matches = append(matches, entry)
return nil
})
if err != nil {
fmt.Fprintf(os.Stderr, "Error processing log file: %v\n", err)
os.Exit(1)
}
// Handle tail option
if *tail > 0 && len(matches) > *tail {
matches = matches[len(matches)-*tail:]
}
if *count {
fmt.Printf("Total entries: %d\n", totalCount)
fmt.Printf("Matching entries: %d\n", len(matches))
} else {
for _, entry := range matches {
fmt.Println(FormatLogEntry(entry, *colorize))
}
if len(matches) > 0 {
fmt.Fprintf(os.Stderr, "\n%d of %d entries matched\n", len(matches), totalCount)
}
}
}
// matchWildcard performs simple wildcard matching where * matches any sequence of characters.
//
// The function supports the following patterns:
// - "*" matches everything
// - "foo*" matches strings starting with "foo"
// - "*foo" matches strings ending with "foo"
// - "*foo*" matches strings containing "foo"
// - "foo*bar" matches strings starting with "foo" and ending with "bar"
//
// Matching is case-insensitive. If the pattern contains no wildcards, it performs
// a simple case-insensitive substring match.
//
// Parameters:
// - s: The string to match against
// - pattern: The pattern with optional wildcards
//
// Returns:
// - true if the string matches the pattern, false otherwise
//
// Examples:
// matchWildcard("channel-4", "channel-*") // returns true
// matchWildcard("main.channel-4.error", "*channel-4*") // returns true
// matchWildcard("test", "foo*") // returns false
func matchWildcard(s, pattern string) bool {
if pattern == "*" {
return true
}
if !strings.Contains(pattern, "*") {
return strings.Contains(strings.ToLower(s), strings.ToLower(pattern))
}
parts := strings.Split(pattern, "*")
s = strings.ToLower(s)
pos := 0
for i, part := range parts {
part = strings.ToLower(part)
if part == "" {
continue
}
idx := strings.Index(s[pos:], part)
if idx == -1 {
return false
}
// First part must match from beginning
if i == 0 && idx != 0 {
return false
}
pos += idx + len(part)
}
// Last part must match to end
if !strings.HasSuffix(pattern, "*") {
lastPart := strings.ToLower(parts[len(parts)-1])
return strings.HasSuffix(s, lastPart)
}
return true
}

5
tools/loganalyzer/go.mod Normal file
View File

@@ -0,0 +1,5 @@
module erupe-loganalyzer
go 1.25
require ()

61
tools/loganalyzer/main.go Normal file
View File

@@ -0,0 +1,61 @@
package main
import (
"flag"
"fmt"
"os"
)
// main is the entry point for the log analyzer CLI tool.
//
// The tool provides five main commands:
// - filter: Filter logs by level, logger, message content, or time range
// - errors: Extract and analyze errors with grouping and stack traces
// - connections: Track player connections and sessions with statistics
// - stats: Generate comprehensive statistics about log activity
// - tail: Follow logs in real-time (like tail -f)
//
// Usage:
// loganalyzer <command> [options]
// loganalyzer filter -level error -since 1h
// loganalyzer errors -summary
// loganalyzer stats -detailed
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Erupe Log Analyzer - Suite of tools to analyze erupe.log files\n\n")
fmt.Fprintf(os.Stderr, "Usage:\n")
fmt.Fprintf(os.Stderr, " %s <command> [options]\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Available commands:\n")
fmt.Fprintf(os.Stderr, " filter Filter logs by level, logger, or time range\n")
fmt.Fprintf(os.Stderr, " errors Extract and analyze errors with stack traces\n")
fmt.Fprintf(os.Stderr, " connections Analyze connection events and player sessions\n")
fmt.Fprintf(os.Stderr, " stats Generate statistics summary\n")
fmt.Fprintf(os.Stderr, " tail Follow log file in real-time (like tail -f)\n")
fmt.Fprintf(os.Stderr, "\nUse '%s <command> -h' for more information about a command.\n", os.Args[0])
}
if len(os.Args) < 2 {
flag.Usage()
os.Exit(1)
}
command := os.Args[1]
args := os.Args[2:]
switch command {
case "filter":
runFilter(args)
case "errors":
runErrors(args)
case "connections":
runConnections(args)
case "stats":
runStats(args)
case "tail":
runTail(args)
default:
fmt.Fprintf(os.Stderr, "Unknown command: %s\n\n", command)
flag.Usage()
os.Exit(1)
}
}

315
tools/loganalyzer/parser.go Normal file
View File

@@ -0,0 +1,315 @@
// Package main provides a comprehensive suite of tools for analyzing Erupe server logs.
//
// The log analyzer supports both JSON-formatted logs and tab-delimited timestamp logs,
// providing commands for filtering, error analysis, connection tracking, statistics
// generation, and real-time log following.
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"strings"
"time"
)
// LogEntry represents a parsed log entry from either JSON or timestamp-based format.
//
// The parser supports two log formats:
// 1. JSON format: {"level":"info","ts":1762989571.547817,"logger":"main","msg":"Starting"}
// 2. Timestamp format: 2025-11-12T23:19:31.546Z INFO commands Command Help: Enabled
type LogEntry struct {
Raw string // Original log line
Level string // Log level: info, warn, error, fatal
Timestamp time.Time // Parsed timestamp
Logger string // Logger name
Caller string // Caller file:line
Message string // Log message
Error string // Error message (if present)
StackTrace string // Stack trace (if present)
ExtraData map[string]interface{} // Additional fields
IsJSON bool // True if parsed from JSON format
}
// ParseLogFile reads and parses an entire log file into memory.
//
// This function loads all log entries into memory and is suitable for smaller log files
// or when random access to entries is needed. For large files or streaming operations,
// use StreamLogFile instead.
//
// The function automatically handles both JSON and timestamp-based log formats,
// skips empty lines and "nohup: ignoring input" messages, and uses a large buffer
// (1MB) to handle long lines like stack traces.
//
// Parameters:
// - filename: Path to the log file to parse
//
// Returns:
// - A slice of LogEntry pointers containing all parsed entries
// - An error if the file cannot be opened or read
//
// Example:
// entries, err := ParseLogFile("erupe.log")
// if err != nil {
// log.Fatal(err)
// }
// fmt.Printf("Parsed %d entries\n", len(entries))
func ParseLogFile(filename string) ([]*LogEntry, error) {
file, err := os.Open(filename)
if err != nil {
return nil, fmt.Errorf("failed to open log file: %w", err)
}
defer file.Close()
var entries []*LogEntry
scanner := bufio.NewScanner(file)
// Increase buffer size for long lines (like stack traces)
const maxCapacity = 1024 * 1024 // 1MB
buf := make([]byte, maxCapacity)
scanner.Buffer(buf, maxCapacity)
for scanner.Scan() {
line := scanner.Text()
if line == "" || line == "nohup: ignoring input" {
continue
}
entry := ParseLogLine(line)
if entry != nil {
entries = append(entries, entry)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error reading log file: %w", err)
}
return entries, nil
}
// ParseLogLine parses a single log line into a LogEntry.
//
// This function attempts to parse the line in the following order:
// 1. JSON format: Lines starting with '{' are parsed as JSON objects
// 2. Timestamp format: Tab-delimited lines with RFC3339 timestamps
// 3. Unknown format: Lines that don't match either format are marked as "unknown" level
//
// For JSON logs, all standard fields (level, ts, logger, caller, msg, error, stacktrace)
// are extracted, and any additional fields are stored in ExtraData.
//
// Parameters:
// - line: A single line from the log file
//
// Returns:
// - A LogEntry pointer containing the parsed data, or nil if the line is invalid
//
// Example:
// entry := ParseLogLine(`{"level":"info","ts":1762989571.547817,"msg":"Starting"}`)
// fmt.Println(entry.Level, entry.Message)
func ParseLogLine(line string) *LogEntry {
entry := &LogEntry{
Raw: line,
ExtraData: make(map[string]interface{}),
}
// Try parsing as JSON first
if strings.HasPrefix(line, "{") {
var jsonData map[string]interface{}
if err := json.Unmarshal([]byte(line), &jsonData); err == nil {
entry.IsJSON = true
// Extract standard fields
if level, ok := jsonData["level"].(string); ok {
entry.Level = level
}
if ts, ok := jsonData["ts"].(float64); ok {
entry.Timestamp = time.Unix(int64(ts), int64((ts-float64(int64(ts)))*1e9))
}
if logger, ok := jsonData["logger"].(string); ok {
entry.Logger = logger
}
if caller, ok := jsonData["caller"].(string); ok {
entry.Caller = caller
}
if msg, ok := jsonData["msg"].(string); ok {
entry.Message = msg
}
if errMsg, ok := jsonData["error"].(string); ok {
entry.Error = errMsg
}
if stackTrace, ok := jsonData["stacktrace"].(string); ok {
entry.StackTrace = stackTrace
}
// Store any extra fields
for k, v := range jsonData {
if k != "level" && k != "ts" && k != "logger" && k != "caller" &&
k != "msg" && k != "error" && k != "stacktrace" {
entry.ExtraData[k] = v
}
}
return entry
}
}
// Try parsing as timestamp-based log (2025-11-12T23:19:31.546Z INFO commands ...)
parts := strings.SplitN(line, "\t", 4)
if len(parts) >= 3 {
// Parse timestamp
if ts, err := time.Parse(time.RFC3339Nano, parts[0]); err == nil {
entry.Timestamp = ts
entry.Level = strings.ToLower(parts[1])
entry.Logger = parts[2]
if len(parts) == 4 {
entry.Message = parts[3]
}
return entry
}
}
// If we can't parse it, return a basic entry
entry.Level = "unknown"
entry.Message = line
return entry
}
// StreamLogFile reads a log file line by line and calls the callback for each entry.
//
// This function is memory-efficient and suitable for processing large log files as it
// processes entries one at a time without loading the entire file into memory. The
// callback function is called for each successfully parsed log entry.
//
// The function uses a 1MB buffer to handle long lines such as those containing stack traces.
// Empty lines and "nohup: ignoring input" messages are automatically skipped.
//
// If the callback returns an error, processing stops immediately and that error is returned.
//
// Parameters:
// - filename: Path to the log file to process
// - callback: Function to call for each parsed LogEntry
//
// Returns:
// - An error if the file cannot be opened, read, or if the callback returns an error
//
// Example:
// err := StreamLogFile("erupe.log", func(entry *LogEntry) error {
// if entry.Level == "error" {
// fmt.Println(entry.Message)
// }
// return nil
// })
func StreamLogFile(filename string, callback func(*LogEntry) error) error {
file, err := os.Open(filename)
if err != nil {
return fmt.Errorf("failed to open log file: %w", err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
// Increase buffer size for long lines
const maxCapacity = 1024 * 1024 // 1MB
buf := make([]byte, maxCapacity)
scanner.Buffer(buf, maxCapacity)
for scanner.Scan() {
line := scanner.Text()
if line == "" || line == "nohup: ignoring input" {
continue
}
entry := ParseLogLine(line)
if entry != nil {
if err := callback(entry); err != nil {
return err
}
}
}
return scanner.Err()
}
// FormatLogEntry formats a log entry for human-readable display.
//
// The output format is: "TIMESTAMP LEVEL [LOGGER] MESSAGE key=value ..."
//
// Timestamps are formatted as "2006-01-02 15:04:05.000". Log levels can be colorized
// for terminal display:
// - Errors (error, fatal, panic): Red
// - Warnings: Yellow
// - Info: Green
//
// If the entry contains an error message, it's appended as error="message".
// Any extra fields in ExtraData are appended as key=value pairs.
//
// Parameters:
// - entry: The LogEntry to format
// - colorize: Whether to add ANSI color codes for terminal display
//
// Returns:
// - A formatted string representation of the log entry
//
// Example:
// formatted := FormatLogEntry(entry, true)
// fmt.Println(formatted)
// // Output: 2025-11-12 23:19:31.546 INFO [main] Starting Erupe
func FormatLogEntry(entry *LogEntry, colorize bool) string {
var sb strings.Builder
// Format timestamp
if !entry.Timestamp.IsZero() {
sb.WriteString(entry.Timestamp.Format("2006-01-02 15:04:05.000"))
sb.WriteString(" ")
}
// Format level with colors
levelStr := strings.ToUpper(entry.Level)
if colorize {
switch entry.Level {
case "error", "fatal", "panic":
levelStr = fmt.Sprintf("\033[31m%s\033[0m", levelStr) // Red
case "warn":
levelStr = fmt.Sprintf("\033[33m%s\033[0m", levelStr) // Yellow
case "info":
levelStr = fmt.Sprintf("\033[32m%s\033[0m", levelStr) // Green
}
}
sb.WriteString(fmt.Sprintf("%-5s ", levelStr))
// Format logger
if entry.Logger != "" {
sb.WriteString(fmt.Sprintf("[%s] ", entry.Logger))
}
// Format message
sb.WriteString(entry.Message)
// Add error if present
if entry.Error != "" {
sb.WriteString(fmt.Sprintf(" error=%q", entry.Error))
}
// Add extra data
if len(entry.ExtraData) > 0 {
sb.WriteString(" ")
first := true
for k, v := range entry.ExtraData {
if !first {
sb.WriteString(" ")
}
sb.WriteString(fmt.Sprintf("%s=%v", k, v))
first = false
}
}
return sb.String()
}

338
tools/loganalyzer/stats.go Normal file
View File

@@ -0,0 +1,338 @@
package main
import (
"flag"
"fmt"
"os"
"sort"
"strings"
"time"
)
// LogStats aggregates comprehensive statistics about log file contents.
//
// This structure tracks various metrics including temporal patterns, log levels,
// message types, and server operations to provide insights into server behavior
// and activity patterns.
type LogStats struct {
TotalEntries int // Total number of log entries
EntriesByLevel map[string]int // Log level to count
EntriesByLogger map[string]int // Logger name to count
EntriesByDay map[string]int // Date string to count
EntriesByHour map[int]int // Hour (0-23) to count
TopMessages map[string]int // Message text to count
FirstEntry time.Time // Timestamp of first entry
LastEntry time.Time // Timestamp of last entry
SaveOperations int // Count of save operations
ObjectBroadcasts int // Count of object broadcasts
StageChanges int // Count of stage changes
TerminalLogs int // Count of terminal log entries
UniqueCallers map[string]bool // Set of unique caller locations
}
// runStats implements the stats command for generating comprehensive log statistics.
//
// The stats command processes the entire log file to collect statistics about:
// - Overall log volume and time span
// - Distribution of log levels (info, warn, error, etc.)
// - Server operation counts (saves, broadcasts, stage changes)
// - Temporal patterns (activity by day and hour)
// - Top loggers and message types
// - Unique code locations generating logs
//
// This provides a high-level overview of server activity and can help identify
// patterns, peak usage times, and potential issues.
//
// Options:
// - f: Path to log file (default: "erupe.log")
// - top: Number of top items to show in detailed view (default: 10)
// - detailed: Show detailed statistics including temporal patterns and top messages
//
// Examples:
// runStats([]string{}) // Basic statistics
// runStats([]string{"-detailed"}) // Full statistics with temporal analysis
// runStats([]string{"-detailed", "-top", "20"}) // Show top 20 items
func runStats(args []string) {
fs := flag.NewFlagSet("stats", flag.ExitOnError)
logFile := fs.String("f", "erupe.log", "Path to log file")
topN := fs.Int("top", 10, "Show top N messages/loggers")
detailed := fs.Bool("detailed", false, "Show detailed statistics")
fs.Parse(args)
stats := &LogStats{
EntriesByLevel: make(map[string]int),
EntriesByLogger: make(map[string]int),
EntriesByDay: make(map[string]int),
EntriesByHour: make(map[int]int),
TopMessages: make(map[string]int),
UniqueCallers: make(map[string]bool),
}
err := StreamLogFile(*logFile, func(entry *LogEntry) error {
stats.TotalEntries++
// Track first and last entry
if stats.FirstEntry.IsZero() || entry.Timestamp.Before(stats.FirstEntry) {
stats.FirstEntry = entry.Timestamp
}
if entry.Timestamp.After(stats.LastEntry) {
stats.LastEntry = entry.Timestamp
}
// Count by level
stats.EntriesByLevel[entry.Level]++
// Count by logger
stats.EntriesByLogger[entry.Logger]++
// Count by day
if !entry.Timestamp.IsZero() {
day := entry.Timestamp.Format("2006-01-02")
stats.EntriesByDay[day]++
// Count by hour of day
hour := entry.Timestamp.Hour()
stats.EntriesByHour[hour]++
}
// Count message types
msg := entry.Message
if len(msg) > 80 {
msg = msg[:80] + "..."
}
stats.TopMessages[msg]++
// Track unique callers
if entry.Caller != "" {
stats.UniqueCallers[entry.Caller] = true
}
// Count specific operations
if strings.Contains(entry.Message, "Wrote recompressed savedata back to DB") {
stats.SaveOperations++
}
if strings.Contains(entry.Message, "Broadcasting new object") {
stats.ObjectBroadcasts++
}
if strings.Contains(entry.Message, "Sending notification to old stage clients") {
stats.StageChanges++
}
if entry.Message == "SysTerminalLog" {
stats.TerminalLogs++
}
return nil
})
if err != nil {
fmt.Fprintf(os.Stderr, "Error processing log file: %v\n", err)
os.Exit(1)
}
printStats(stats, *topN, *detailed)
}
// printStats displays formatted statistics output.
//
// In basic mode, shows:
// - Total entries, time range, and average rate
// - Distribution by log level
// - Operation counts
//
// In detailed mode, additionally shows:
// - Top N loggers by volume
// - Entries by day
// - Activity distribution by hour of day (with bar chart)
// - Top N message types
//
// Parameters:
// - stats: LogStats structure containing collected statistics
// - topN: Number of top items to display in detailed view
// - detailed: Whether to show detailed statistics
func printStats(stats *LogStats, topN int, detailed bool) {
fmt.Printf("=== Erupe Log Statistics ===\n\n")
// Basic stats
fmt.Printf("Total Log Entries: %s\n", formatNumber(stats.TotalEntries))
if !stats.FirstEntry.IsZero() && !stats.LastEntry.IsZero() {
duration := stats.LastEntry.Sub(stats.FirstEntry)
fmt.Printf("Time Range: %s to %s\n",
stats.FirstEntry.Format("2006-01-02 15:04:05"),
stats.LastEntry.Format("2006-01-02 15:04:05"))
fmt.Printf("Total Duration: %s\n", formatDuration(duration))
if duration.Hours() > 0 {
entriesPerHour := float64(stats.TotalEntries) / duration.Hours()
fmt.Printf("Average Entries/Hour: %.1f\n", entriesPerHour)
}
}
fmt.Println()
// Log levels
fmt.Printf("--- Entries by Log Level ---\n")
levels := []string{"info", "warn", "error", "fatal", "panic", "unknown"}
for _, level := range levels {
if count, ok := stats.EntriesByLevel[level]; ok {
percentage := float64(count) / float64(stats.TotalEntries) * 100
fmt.Printf(" %-8s: %s (%.1f%%)\n", strings.ToUpper(level), formatNumber(count), percentage)
}
}
fmt.Println()
// Operation counts
fmt.Printf("--- Operation Counts ---\n")
fmt.Printf(" Save Operations: %s\n", formatNumber(stats.SaveOperations))
fmt.Printf(" Object Broadcasts: %s\n", formatNumber(stats.ObjectBroadcasts))
fmt.Printf(" Stage Changes: %s\n", formatNumber(stats.StageChanges))
fmt.Printf(" Terminal Logs: %s\n", formatNumber(stats.TerminalLogs))
fmt.Printf(" Unique Callers: %s\n", formatNumber(len(stats.UniqueCallers)))
fmt.Println()
if detailed {
// Top loggers
if len(stats.EntriesByLogger) > 0 {
fmt.Printf("--- Top %d Loggers ---\n", topN)
printTopMap(stats.EntriesByLogger, topN, stats.TotalEntries)
fmt.Println()
}
// Entries by day
if len(stats.EntriesByDay) > 0 {
fmt.Printf("--- Entries by Day ---\n")
printDayMap(stats.EntriesByDay)
fmt.Println()
}
// Entries by hour
if len(stats.EntriesByHour) > 0 {
fmt.Printf("--- Activity by Hour of Day ---\n")
printHourDistribution(stats.EntriesByHour, stats.TotalEntries)
fmt.Println()
}
// Top messages
if len(stats.TopMessages) > 0 {
fmt.Printf("--- Top %d Messages ---\n", topN)
printTopMap(stats.TopMessages, topN, stats.TotalEntries)
fmt.Println()
}
}
}
// printTopMap displays the top N items from a map sorted by count.
//
// The output includes:
// - Rank number (1, 2, 3, ...)
// - Item key (truncated to 60 characters if longer)
// - Count with thousand separators
// - Percentage of total
//
// Parameters:
// - m: Map of items to counts
// - topN: Maximum number of items to display
// - total: Total count for calculating percentages
func printTopMap(m map[string]int, topN, total int) {
type pair struct {
key string
count int
}
var pairs []pair
for k, v := range m {
pairs = append(pairs, pair{k, v})
}
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].count > pairs[j].count
})
if len(pairs) > topN {
pairs = pairs[:topN]
}
for i, p := range pairs {
percentage := float64(p.count) / float64(total) * 100
key := p.key
if len(key) > 60 {
key = key[:57] + "..."
}
fmt.Printf(" %2d. %-60s: %s (%.1f%%)\n", i+1, key, formatNumber(p.count), percentage)
}
}
// printDayMap displays entries grouped by day in chronological order.
//
// Output format: "YYYY-MM-DD: count"
// Days are sorted chronologically from earliest to latest.
//
// Parameters:
// - m: Map of date strings (YYYY-MM-DD format) to counts
func printDayMap(m map[string]int) {
type pair struct {
day string
count int
}
var pairs []pair
for k, v := range m {
pairs = append(pairs, pair{k, v})
}
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].day < pairs[j].day
})
for _, p := range pairs {
fmt.Printf(" %s: %s\n", p.day, formatNumber(p.count))
}
}
// printHourDistribution displays log activity by hour of day with a bar chart.
//
// For each hour (0-23), shows:
// - Hour range (e.g., "14:00 - 14:59")
// - ASCII bar chart visualization (█ characters proportional to percentage)
// - Count with thousand separators
// - Percentage of total
//
// Hours with no activity are skipped.
//
// Parameters:
// - m: Map of hours (0-23) to entry counts
// - total: Total number of entries for percentage calculation
func printHourDistribution(m map[int]int, total int) {
for hour := 0; hour < 24; hour++ {
count := m[hour]
if count == 0 {
continue
}
percentage := float64(count) / float64(total) * 100
bar := strings.Repeat("█", int(percentage))
fmt.Printf(" %02d:00 - %02d:59: %-20s %s (%.1f%%)\n",
hour, hour, bar, formatNumber(count), percentage)
}
}
// formatNumber formats an integer with thousand separators for readability.
//
// Examples:
// - 123 -> "123"
// - 1234 -> "1,234"
// - 1234567 -> "1,234,567"
//
// Parameters:
// - n: The integer to format
//
// Returns:
// - A string with comma separators
func formatNumber(n int) string {
if n < 1000 {
return fmt.Sprintf("%d", n)
}
if n < 1000000 {
return fmt.Sprintf("%d,%03d", n/1000, n%1000)
}
return fmt.Sprintf("%d,%03d,%03d", n/1000000, (n/1000)%1000, n%1000)
}

108
tools/loganalyzer/tail.go Normal file
View File

@@ -0,0 +1,108 @@
package main
import (
"bufio"
"flag"
"fmt"
"os"
"time"
)
// runTail implements the tail command for following log files in real-time.
//
// The tail command mimics the Unix `tail -f` command, displaying the last N lines
// of a log file and then continuously monitoring the file for new entries. This is
// useful for real-time monitoring of server activity.
//
// The command operates in two phases:
// 1. Initial display: Shows the last N matching entries from the file
// 2. Follow mode: Continuously monitors for new lines and displays them as they appear
//
// Both phases support filtering by log level and colorized output.
//
// Options:
// - f: Path to log file (default: "erupe.log")
// - n: Number of initial lines to show (default: 10)
// - follow: Whether to continue following the file (default: true)
// - level: Filter by log level (info, warn, error, fatal)
// - color: Colorize output (default: true)
//
// The follow mode polls the file every 100ms for new content. Use Ctrl+C to stop.
//
// Examples:
// runTail([]string{}) // Show last 10 lines and follow
// runTail([]string{"-n", "50"}) // Show last 50 lines and follow
// runTail([]string{"-level", "error"}) // Only show errors
// runTail([]string{"-follow=false", "-n", "20"}) // Just show last 20 lines, don't follow
func runTail(args []string) {
fs := flag.NewFlagSet("tail", flag.ExitOnError)
logFile := fs.String("f", "erupe.log", "Path to log file")
lines := fs.Int("n", 10, "Number of initial lines to show")
follow := fs.Bool("follow", true, "Follow the log file (like tail -f)")
level := fs.String("level", "", "Filter by log level")
colorize := fs.Bool("color", true, "Colorize output")
fs.Parse(args)
// First, show last N lines
if *lines > 0 {
entries, err := ParseLogFile(*logFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading log file: %v\n", err)
os.Exit(1)
}
// Filter by level if specified
var filtered []*LogEntry
for _, entry := range entries {
if *level == "" || entry.Level == *level {
filtered = append(filtered, entry)
}
}
// Show last N lines
start := len(filtered) - *lines
if start < 0 {
start = 0
}
for i := start; i < len(filtered); i++ {
fmt.Println(FormatLogEntry(filtered[i], *colorize))
}
}
// If follow is enabled, watch for new lines
if *follow {
file, err := os.Open(*logFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error opening log file: %v\n", err)
os.Exit(1)
}
defer file.Close()
// Seek to end of file
file.Seek(0, 2)
reader := bufio.NewReader(file)
fmt.Fprintln(os.Stderr, "Following log file... (Ctrl+C to stop)")
for {
line, err := reader.ReadString('\n')
if err != nil {
// No more data, wait a bit and try again
time.Sleep(100 * time.Millisecond)
continue
}
entry := ParseLogLine(line)
if entry != nil {
// Filter by level if specified
if *level == "" || entry.Level == *level {
fmt.Println(FormatLogEntry(entry, *colorize))
}
}
}
}
}