doc: inline code documentation.

This commit is contained in:
Houmgaor
2025-11-24 18:41:37 +01:00
parent a992c5a603
commit 64cc285fd8
11 changed files with 697 additions and 181 deletions

File diff suppressed because one or more lines are too long

View File

@@ -11,11 +11,28 @@ import (
"go.uber.org/zap"
)
// handleMsgSysCreateStage creates a new stage (room/quest instance).
//
// This is called when a player:
// - Posts a quest
// - Creates a private room
// - Initiates any activity requiring a new stage instance
//
// The handler:
// 1. Checks if stage already exists (return failure if it does)
// 2. Creates new stage with the requesting session as host
// 3. Sets max player count from packet
// 4. Adds stage to server's stage map
// 5. Responds with success/failure
//
// Note: This only creates the stage; the player must call MSG_SYS_ENTER_STAGE
// to actually enter it after creation.
func handleMsgSysCreateStage(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgSysCreateStage)
s.server.Lock()
defer s.server.Unlock()
if _, exists := s.server.stages[pkt.StageID]; exists {
// Stage already exists, cannot create duplicate
doAckSimpleFail(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
} else {
stage := NewStage(pkt.StageID)
@@ -28,6 +45,27 @@ func handleMsgSysCreateStage(s *Session, p mhfpacket.MHFPacket) {
func handleMsgSysStageDestruct(s *Session, p mhfpacket.MHFPacket) {}
// doStageTransfer handles the common logic for entering/moving to a stage.
//
// This is a helper function called by handleMsgSysEnterStage and handleMsgSysMoveStage.
// It performs the full stage entry process:
//
// 1. Find or create the target stage
// 2. Add session to the stage's client map
// 3. Remove session from previous stage (if any)
// 4. Update session's stage pointers
// 5. Send cleanup command to client (clear old stage objects)
// 6. Send acknowledgment
// 7. Synchronize existing stage objects to the new player
// 8. Notify other players in the stage about new player
//
// If the stage doesn't exist, it creates it automatically (for persistent town stages).
// For quest stages, MSG_SYS_CREATE_STAGE should be called first.
//
// Parameters:
// - s: The session entering the stage
// - ackHandle: The ack handle to respond to
// - stageID: The stage ID to enter
func doStageTransfer(s *Session, ackHandle uint32, stageID string) {
s.server.Lock()
stage, exists := s.server.stages[stageID]
@@ -37,7 +75,7 @@ func doStageTransfer(s *Session, ackHandle uint32, stageID string) {
stage.Lock()
stage.clients[s] = s.charID
stage.Unlock()
} else { // Create new stage object
} else { // Create new stage object (for persistent stages like towns)
s.server.Lock()
s.server.stages[stageID] = NewStage(stageID)
stage = s.server.stages[stageID]
@@ -48,21 +86,21 @@ func doStageTransfer(s *Session, ackHandle uint32, stageID string) {
stage.Unlock()
}
// Ensure this session no longer belongs to reservations.
// Ensure this session no longer belongs to their previous stage
if s.stage != nil {
removeSessionFromStage(s)
}
// Save our new stage ID and pointer to the new stage itself.
// Save our new stage ID and pointer to the new stage itself
s.Lock()
s.stageID = stageID
s.stage = s.server.stages[stageID]
s.Unlock()
// Tell the client to cleanup its current stage objects.
// Tell the client to cleanup its current stage objects
s.QueueSendMHF(&mhfpacket.MsgSysCleanupObject{})
// Confirm the stage entry.
// Confirm the stage entry
doAckSimpleSucceed(s, ackHandle, []byte{0x00, 0x00, 0x00, 0x00})
var temp mhfpacket.MHFPacket

View File

@@ -5,10 +5,49 @@ import (
"erupe-ce/network/mhfpacket"
)
// handlerFunc is the signature for all packet handler functions.
//
// Handler functions are called when a packet with a matching opcode is received.
// They process the packet and typically respond using the session's Queue methods.
//
// Parameters:
// - s: The session that received the packet (contains player state, connection)
// - p: The parsed packet (must be type-asserted to the specific packet type)
//
// Handler functions should:
// 1. Type-assert the packet to its specific type
// 2. Validate the packet data and session state
// 3. Perform the requested operation (database query, state change, etc.)
// 4. Send a response using doAckBufSucceed/Fail or s.QueueSendMHF
// 5. Handle errors gracefully (log and send error response to client)
type handlerFunc func(s *Session, p mhfpacket.MHFPacket)
// handlerTable maps packet opcodes to their handler functions.
//
// This is the central routing table for all incoming packets. When a packet
// arrives, the session's handlePacketGroup() function:
// 1. Reads the opcode from the packet header
// 2. Looks up the handler in this table
// 3. Calls the handler with the session and parsed packet
//
// The table is initialized in init() and contains ~400+ packet handlers covering:
// - System packets (MSG_SYS_*): Connection, stages, objects, semaphores
// - MHF packets (MSG_MHF_*): Game features (quests, guilds, items, events)
// - CA packets (MSG_CA_*): Caravan system
//
// If a packet has no registered handler, it's ignored (logged in dev mode).
var handlerTable map[network.PacketID]handlerFunc
// init registers all packet handlers in the handlerTable.
//
// Handlers are organized by feature:
// - handlers_*.go files implement related handler functions
// - This init function registers them all in the central table
//
// Adding a new handler:
// 1. Implement handleMsgYourPacket() in appropriate handlers_*.go file
// 2. Add registration here: handlerTable[network.MSG_YOUR_PACKET] = handleMsgYourPacket
// 3. Define the packet structure in network/mhfpacket/msg_*.go
func init() {
handlerTable = make(map[network.PacketID]handlerFunc)
handlerTable[network.MSG_HEAD] = handleMsgHead

View File

@@ -1,3 +1,29 @@
// Package channelserver implements the Monster Hunter Frontier channel server.
//
// The channel server is the core gameplay component that handles actual game sessions,
// quests, player interactions, and all in-game activities. It uses a stage-based
// architecture where players move between stages (game areas/rooms) and interact
// with other players in real-time.
//
// Architecture Overview:
//
// The channel server manages three primary concepts:
// - Sessions: Individual player connections with their state and packet queues
// - Stages: Game rooms/areas where players interact (towns, quests, lobbies)
// - Semaphores: Resource locks for coordinating multiplayer activities (quests, events)
//
// Multiple channel servers can run simultaneously on different ports, allowing
// horizontal scaling and separation of different world types (Newbie, Normal, etc).
//
// Thread Safety:
//
// This package extensively uses goroutines and shared state. All shared resources
// are protected by mutexes. When modifying code, always consider thread safety:
// - Server-level: s.Lock() / s.Unlock() for session map
// - Stage-level: s.stagesLock.RLock() / s.stagesLock.Lock() for stage map
// - Session-level: session.Lock() / session.Unlock() for session state
//
// Use 'go test -race ./...' to detect race conditions during development.
package channelserver
import (
@@ -16,91 +42,120 @@ import (
"go.uber.org/zap"
)
// Config struct allows configuring the server.
// Config holds configuration parameters for creating a new channel server.
type Config struct {
ID uint16
Logger *zap.Logger
DB *sqlx.DB
DiscordBot *discordbot.DiscordBot
ErupeConfig *config.Config
Name string
Enable bool
ID uint16 // Channel server ID (unique identifier)
Logger *zap.Logger // Logger instance for this channel server
DB *sqlx.DB // Database connection pool
DiscordBot *discordbot.DiscordBot // Optional Discord bot for chat integration
ErupeConfig *config.Config // Global Erupe configuration
Name string // Display name for the server (shown in broadcasts)
Enable bool // Whether this server is enabled
}
// Map key type for a user binary part.
// userBinaryPartID is a composite key for identifying a specific part of a user's binary data.
// User binary data is split into multiple indexed parts and stored separately.
type userBinaryPartID struct {
charID uint32
index uint8
charID uint32 // Character ID who owns this binary data
index uint8 // Part index (binary data is chunked into multiple parts)
}
// Server is a MHF channel server.
// Server represents a Monster Hunter Frontier channel server instance.
//
// The Server manages all active player sessions, game stages, and shared resources.
// It runs two main goroutines: one for accepting connections and one for managing
// the session lifecycle.
//
// Thread Safety:
// Server embeds sync.Mutex for protecting the sessions map. Use Lock()/Unlock()
// when reading or modifying s.sessions. The stages map uses a separate RWMutex
// (stagesLock) to allow concurrent reads during normal gameplay.
type Server struct {
sync.Mutex
Channels []*Server
ID uint16
GlobalID string
IP string
Port uint16
logger *zap.Logger
db *sqlx.DB
erupeConfig *config.Config
acceptConns chan net.Conn
deleteConns chan net.Conn
sessions map[net.Conn]*Session
listener net.Listener // Listener that is created when Server.Start is called.
isShuttingDown bool
sync.Mutex // Protects sessions map and isShuttingDown flag
stagesLock sync.RWMutex
stages map[string]*Stage
// Server identity and network configuration
Channels []*Server // Reference to all channel servers (for world broadcasts)
ID uint16 // This server's ID
GlobalID string // Global identifier string
IP string // Server IP address
Port uint16 // Server listening port
// Used to map different languages
dict map[string]string
// Core dependencies
logger *zap.Logger // Logger instance
db *sqlx.DB // Database connection pool
erupeConfig *config.Config // Global configuration
// UserBinary
userBinaryPartsLock sync.RWMutex
userBinaryParts map[userBinaryPartID][]byte
// Connection management
acceptConns chan net.Conn // Channel for new accepted connections
deleteConns chan net.Conn // Channel for connections to be cleaned up
sessions map[net.Conn]*Session // Active sessions keyed by connection
listener net.Listener // TCP listener (created when Server.Start is called)
isShuttingDown bool // Shutdown flag to stop goroutines gracefully
// Semaphore
semaphoreLock sync.RWMutex
semaphore map[string]*Semaphore
semaphoreIndex uint32
// Stage (game room) management
stagesLock sync.RWMutex // Protects stages map (RWMutex for concurrent reads)
stages map[string]*Stage // Active stages keyed by stage ID string
// Discord chat integration
discordBot *discordbot.DiscordBot
// Localization
dict map[string]string // Language string mappings for server messages
name string
// User binary data storage
// Binary data is player-specific custom data that the client stores on the server
userBinaryPartsLock sync.RWMutex // Protects userBinaryParts map
userBinaryParts map[userBinaryPartID][]byte // Chunked binary data by character
raviente *Raviente
// Semaphore (multiplayer coordination) management
semaphoreLock sync.RWMutex // Protects semaphore map and semaphoreIndex
semaphore map[string]*Semaphore // Active semaphores keyed by semaphore ID
semaphoreIndex uint32 // Auto-incrementing ID for new semaphores (starts at 7)
// Optional integrations
discordBot *discordbot.DiscordBot // Discord bot for chat relay (nil if disabled)
name string // Server display name (used in chat messages)
// Special event system: Raviente (large-scale multiplayer raid)
raviente *Raviente // Raviente event state and coordination
}
// Raviente manages the Raviente raid event, a large-scale multiplayer encounter.
//
// Raviente is a special monster that requires coordination between many players
// across multiple phases. This struct tracks registration, event state, and
// support/assistance data for the active Raviente encounter.
type Raviente struct {
sync.Mutex
sync.Mutex // Protects all Raviente data during concurrent access
register *RavienteRegister
state *RavienteState
support *RavienteSupport
register *RavienteRegister // Player registration and event timing
state *RavienteState // Current state of the Raviente encounter
support *RavienteSupport // Support/assistance tracking data
}
// RavienteRegister tracks player registration and timing for Raviente events.
type RavienteRegister struct {
nextTime uint32
startTime uint32
postTime uint32
killedTime uint32
ravienteType uint32
maxPlayers uint32
carveQuest uint32
register []uint32
nextTime uint32 // Timestamp for next Raviente event
startTime uint32 // Event start timestamp
postTime uint32 // Event post-completion timestamp
killedTime uint32 // Timestamp when Raviente was defeated
ravienteType uint32 // Raviente variant (2=Berserk, 3=Extreme, 4=Extreme Limited, 5=Berserk Small)
maxPlayers uint32 // Maximum players allowed (determines scaling)
carveQuest uint32 // Quest ID for carving phase after defeat
register []uint32 // List of registered player IDs (up to 5 slots)
}
// RavienteState holds the dynamic state data for an active Raviente encounter.
// The state array contains 29 uint32 values tracking encounter progress.
type RavienteState struct {
stateData []uint32
stateData []uint32 // Raviente encounter state (29 values)
}
// RavienteSupport tracks support and assistance data for Raviente encounters.
// The support array contains 25 uint32 values for coordination features.
type RavienteSupport struct {
supportData []uint32
supportData []uint32 // Support/assistance data (25 values)
}
// Set up the Raviente variables for the server
// NewRaviente creates and initializes a new Raviente event manager with default values.
// All state and support arrays are initialized to zero, ready for a new event.
func NewRaviente() *Raviente {
ravienteRegister := &RavienteRegister{
nextTime: 0,
@@ -125,6 +180,15 @@ func NewRaviente() *Raviente {
return raviente
}
// GetRaviMultiplier calculates the difficulty multiplier for Raviente based on player count.
//
// Raviente scales its difficulty based on the number of active participants. If there
// are fewer players than the minimum threshold, the encounter becomes easier by returning
// a multiplier < 1. Returns 1.0 for full groups, or 0 if the semaphore doesn't exist.
//
// Minimum player thresholds:
// - Large Raviente (maxPlayers > 8): 24 players minimum
// - Small Raviente (maxPlayers <= 8): 4 players minimum
func (r *Raviente) GetRaviMultiplier(s *Server) float64 {
raviSema := getRaviSemaphore(s)
if raviSema != nil {
@@ -142,7 +206,19 @@ func (r *Raviente) GetRaviMultiplier(s *Server) float64 {
return 0
}
// NewServer creates a new Server type.
// NewServer creates and initializes a new channel server with the given configuration.
//
// The server is initialized with default persistent stages (town areas that always exist):
// - sl1Ns200p0a0u0: Mezeporta (main town)
// - sl1Ns211p0a0u0: Rasta bar
// - sl1Ns260p0a0u0: Pallone Caravan
// - sl1Ns262p0a0u0: Pallone Guest House 1st Floor
// - sl1Ns263p0a0u0: Pallone Guest House 2nd Floor
// - sl2Ns379p0a0u0: Diva fountain / prayer fountain
// - sl1Ns462p0a0u0: MezFes (festival area)
//
// Additional dynamic stages are created by players when they create quests or rooms.
// The semaphore index starts at 7 to avoid reserved IDs 0-6.
func NewServer(config *Config) *Server {
s := &Server{
ID: config.ID,
@@ -187,7 +263,16 @@ func NewServer(config *Config) *Server {
return s
}
// Start starts the server in a new goroutine.
// Start begins listening for connections and starts the server's main goroutines.
//
// This method:
// 1. Creates a TCP listener on the configured port
// 2. Launches acceptClients() goroutine to accept new connections
// 3. Launches manageSessions() goroutine to handle session lifecycle
// 4. Optionally starts Discord chat integration
//
// Returns an error if the listener cannot be created (e.g., port in use).
// The server runs asynchronously after Start() returns successfully.
func (s *Server) Start() error {
l, err := net.Listen("tcp", fmt.Sprintf(":%d", s.Port))
if err != nil {
@@ -206,7 +291,15 @@ func (s *Server) Start() error {
return nil
}
// Shutdown tries to shut down the server gracefully.
// Shutdown gracefully stops the server and all its goroutines.
//
// This method:
// 1. Sets the shutdown flag to stop accepting new connections
// 2. Closes the TCP listener (causes acceptClients to exit)
// 3. Closes the acceptConns channel (signals manageSessions to exit)
//
// Existing sessions are not forcibly disconnected but will eventually timeout
// or disconnect naturally. For a complete shutdown, wait for all sessions to close.
func (s *Server) Shutdown() {
s.Lock()
s.isShuttingDown = true
@@ -267,7 +360,17 @@ func (s *Server) manageSessions() {
}
}
// BroadcastMHF queues a MHFPacket to be sent to all sessions.
// BroadcastMHF sends a packet to all active sessions on this channel server.
//
// The packet is built individually for each session to handle per-session state
// (like client version differences). Packets are queued in a non-blocking manner,
// so if a session's queue is full, the packet is dropped for that session only.
//
// Parameters:
// - pkt: The MHFPacket to broadcast to all sessions
// - ignoredSession: Optional session to exclude from the broadcast (typically the sender)
//
// Thread Safety: This method locks the server's session map during iteration.
func (s *Server) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) {
// Broadcast the data.
s.Lock()
@@ -289,6 +392,16 @@ func (s *Server) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session)
}
}
// WorldcastMHF broadcasts a packet to all channel servers (world-wide broadcast).
//
// This is used for server-wide announcements like Raviente events that should be
// visible to all players across all channels. The packet is sent to every channel
// server except the one specified in ignoredChannel.
//
// Parameters:
// - pkt: The MHFPacket to broadcast across all channels
// - ignoredSession: Optional session to exclude from broadcasts
// - ignoredChannel: Optional channel server to skip (typically the originating channel)
func (s *Server) WorldcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session, ignoredChannel *Server) {
for _, c := range s.Channels {
if c == ignoredChannel {
@@ -298,7 +411,13 @@ func (s *Server) WorldcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session,
}
}
// BroadcastChatMessage broadcasts a simple chat message to all the sessions.
// BroadcastChatMessage sends a simple chat message to all sessions on this server.
//
// The message appears as a system message with the server's configured name as the sender.
// This is typically used for server announcements, maintenance notifications, or events.
//
// Parameters:
// - message: The text message to broadcast to all players
func (s *Server) BroadcastChatMessage(message string) {
bf := byteframe.NewByteFrame()
bf.SetLE()

View File

@@ -7,27 +7,62 @@ import (
"sync"
)
// Stage holds stage-specific information
// Semaphore is a multiplayer coordination mechanism for quests and events.
//
// Despite the name, Semaphore is NOT an OS synchronization primitive (like sync.Semaphore).
// Instead, it's a game-specific resource lock that coordinates multiplayer activities where:
// - Players must acquire a semaphore before participating
// - A limited number of participants are allowed (maxPlayers)
// - The semaphore tracks both active and reserved participants
//
// Use Cases:
// - Quest coordination: Ensures quest party size limits are enforced
// - Event coordination: Raviente, VS Tournament, Diva Defense
// - Global resources: Prevents multiple groups from starting conflicting events
//
// Semaphore vs Stage:
// - Stages are spatial (game rooms, areas). Players in a stage can see each other.
// - Semaphores are logical (coordination locks). Players in a semaphore are
// participating in the same activity but may be in different stages.
//
// Example: Raviente Event
// - Players acquire the Raviente semaphore to register for the event
// - Multiple quest stages exist (preparation, phase 1, phase 2, carving)
// - All participants share the same semaphore across different stages
// - The semaphore enforces the 32-player limit across all stages
//
// Thread Safety:
// Semaphore embeds sync.RWMutex. Use RLock for reads and Lock for writes.
type Semaphore struct {
sync.RWMutex
sync.RWMutex // Protects semaphore state during concurrent access
// Stage ID string
id_semaphore string
// Semaphore identity
id_semaphore string // Semaphore ID string (identifies the resource/activity)
id uint32 // Numeric ID for client communication (auto-generated, starts at 7)
id uint32
// Active participants
clients map[*Session]uint32 // Sessions actively using this semaphore -> character ID
// Map of session -> charID.
// These are clients that are CURRENTLY in the stage
clients map[*Session]uint32
// Reserved slots
// Players who have acquired the semaphore but may not be actively in the stage yet.
// The value is always nil; only the key (charID) matters. This is a set implementation.
reservedClientSlots map[uint32]interface{} // Character ID -> nil (set of reserved IDs)
// Map of charID -> interface{}, only the key is used, value is always nil.
reservedClientSlots map[uint32]interface{}
// Max Players for Semaphore
maxPlayers uint16
// Capacity
maxPlayers uint16 // Maximum concurrent participants (e.g., 4 for quests, 32 for Raviente)
}
// NewStage creates a new stage with intialized values.
// NewSemaphore creates and initializes a new Semaphore for coordinating an activity.
//
// The semaphore is assigned an auto-incrementing ID from the server's semaphoreIndex.
// IDs 0-6 are reserved, so the first semaphore gets ID 7.
//
// Parameters:
// - s: The server (used to generate unique semaphore ID)
// - ID: Semaphore ID string (identifies the activity/resource)
// - MaxPlayers: Maximum participants allowed
//
// Returns a new Semaphore ready for client acquisition.
func NewSemaphore(s *Server, ID string, MaxPlayers uint16) *Semaphore {
sema := &Semaphore{
id_semaphore: ID,
@@ -55,7 +90,22 @@ func (s *Semaphore) BroadcastRavi(pkt mhfpacket.MHFPacket) {
}
}
// BroadcastMHF queues a MHFPacket to be sent to all sessions in the stage.
// BroadcastMHF sends a packet to all active participants in the semaphore.
//
// This is used for event-wide announcements that all participants need to see,
// regardless of which stage they're currently in. Examples:
// - Raviente phase changes
// - Tournament updates
// - Event completion notifications
//
// Only active clients (in the clients map) receive broadcasts. Reserved clients
// who haven't fully joined yet are excluded.
//
// Parameters:
// - pkt: The MHFPacket to broadcast to all participants
// - ignoredSession: Optional session to exclude from broadcast
//
// Thread Safety: Caller should hold semaphore lock when iterating clients.
func (s *Semaphore) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) {
// Broadcast the data.
for session := range s.clients {

View File

@@ -18,54 +18,85 @@ import (
"go.uber.org/zap"
)
// packet is an internal wrapper for queued outbound packets.
type packet struct {
data []byte
nonBlocking bool
data []byte // Raw packet bytes to send
nonBlocking bool // If true, drop packet if queue is full instead of blocking
}
// Session holds state for the channel server connection.
// Session represents an active player connection to the channel server.
//
// Each Session manages a single player's connection lifecycle, including:
// - Packet send/receive loops running in separate goroutines
// - Current stage (game area) and stage movement history
// - Character state (ID, courses, guild, etc.)
// - Mail system state
// - Quest/semaphore participation
//
// Lifecycle:
// 1. Created by NewSession() when a player connects
// 2. Started with Start() which launches send/recv goroutines
// 3. Processes packets through handlePacketGroup() -> handler functions
// 4. Cleaned up when connection closes or times out (30 second inactivity)
//
// Thread Safety:
// Session embeds sync.Mutex to protect mutable state. Most handler functions
// acquire the session lock when modifying session fields. The packet queue
// (sendPackets channel) is safe for concurrent access.
type Session struct {
sync.Mutex
logger *zap.Logger
server *Server
rawConn net.Conn
cryptConn *network.CryptConn
sendPackets chan packet
clientContext *clientctx.ClientContext
lastPacket time.Time
sync.Mutex // Protects session state during concurrent handler execution
userEnteredStage bool // If the user has entered a stage before
stageID string
stage *Stage
reservationStage *Stage // Required for the stateful MsgSysUnreserveStage packet.
stagePass string // Temporary storage
prevGuildID uint32 // Stores the last GuildID used in InfoGuild
charID uint32
logKey []byte
sessionStart int64
courses []mhfcourse.Course
token string
kqf []byte
kqfOverride bool
// Core connection and logging
logger *zap.Logger // Logger with connection address
server *Server // Parent server reference
rawConn net.Conn // Underlying TCP connection
cryptConn *network.CryptConn // Encrypted connection wrapper
sendPackets chan packet // Outbound packet queue (buffered, size 20)
clientContext *clientctx.ClientContext // Client version and capabilities
lastPacket time.Time // Timestamp of last received packet (for timeout detection)
semaphore *Semaphore // Required for the stateful MsgSysUnreserveStage packet.
// Stage (game area) state
userEnteredStage bool // Whether player has entered any stage during this session
stageID string // Current stage ID string (e.g., "sl1Ns200p0a0u0")
stage *Stage // Pointer to current stage object
reservationStage *Stage // Stage reserved for quest (used by unreserve packet)
stagePass string // Temporary password storage for password-protected stages
stageMoveStack *stringstack.StringStack // Navigation history for "back" functionality
// A stack containing the stage movement history (push on enter/move, pop on back)
stageMoveStack *stringstack.StringStack
// Player identity and state
charID uint32 // Character ID for this session
Name string // Character name (for debugging/logging)
prevGuildID uint32 // Last guild ID queried (cached for InfoGuild)
token string // Authentication token from sign server
logKey []byte // Logging encryption key
sessionStart int64 // Session start timestamp (Unix time)
courses []mhfcourse.Course // Active Monster Hunter courses (buffs/subscriptions)
kqf []byte // Key Quest Flags (quest progress tracking)
kqfOverride bool // Whether KQF is being overridden
// Accumulated index used for identifying mail for a client
// I'm not certain why this is used, but since the client is sending it
// I want to rely on it for now as it might be important later.
mailAccIndex uint8
// Contains the mail list that maps accumulated indexes to mail IDs
mailList []int
// Quest/event coordination
semaphore *Semaphore // Semaphore for quest/event participation (if in a coordinated activity)
// For Debuging
Name string
closed bool
// Mail system state
// The mail system uses an accumulated index system where the client tracks
// mail by incrementing indices rather than direct mail IDs
mailAccIndex uint8 // Current accumulated mail index for this session
mailList []int // Maps accumulated indices to actual mail IDs
// Connection state
closed bool // Whether connection has been closed (prevents double-cleanup)
}
// NewSession creates a new Session type.
// NewSession creates and initializes a new Session for an incoming connection.
//
// The session is created with:
// - A logger tagged with the connection's remote address
// - An encrypted connection wrapper
// - A buffered packet send queue (size 20)
// - Initialized stage movement stack for navigation
// - Session start time set to current time
//
// After creation, call Start() to begin processing packets.
func NewSession(server *Server, conn net.Conn) *Session {
s := &Session{
logger: server.logger.Named(conn.RemoteAddr().String()),
@@ -81,7 +112,17 @@ func NewSession(server *Server, conn net.Conn) *Session {
return s
}
// Start starts the session packet send and recv loop(s).
// Start begins the session's packet processing by launching send and receive goroutines.
//
// This method spawns two long-running goroutines:
// 1. sendLoop(): Continuously sends queued packets to the client
// 2. recvLoop(): Continuously receives and processes packets from the client
//
// The receive loop handles packet parsing, routing to handlers, and recursive
// packet group processing (when multiple packets arrive in one read).
//
// Both loops run until the connection closes or times out. Unlike the sign and
// entrance servers, the channel server does NOT expect an 8-byte NULL initialization.
func (s *Session) Start() {
go func() {
s.logger.Debug("New connection", zap.String("RemoteAddr", s.rawConn.RemoteAddr().String()))
@@ -92,7 +133,19 @@ func (s *Session) Start() {
}()
}
// QueueSend queues a packet (raw []byte) to be sent.
// QueueSend queues a packet for transmission to the client (blocking).
//
// This method:
// 1. Logs the outbound packet (if dev mode is enabled)
// 2. Attempts to enqueue the packet to the send channel
// 3. If the queue is full, flushes non-blocking packets and retries
//
// Blocking vs Non-blocking:
// This is a blocking send - if the queue fills, it will flush non-blocking
// packets (broadcasts, non-critical messages) to make room for this packet.
// Use QueueSendNonBlocking() for packets that can be safely dropped.
//
// Thread Safety: Safe for concurrent calls from multiple goroutines.
func (s *Session) QueueSend(data []byte) {
s.logMessage(binary.BigEndian.Uint16(data[0:2]), data, "Server", s.Name)
select {
@@ -114,7 +167,18 @@ func (s *Session) QueueSend(data []byte) {
}
}
// QueueSendNonBlocking queues a packet (raw []byte) to be sent, dropping the packet entirely if the queue is full.
// QueueSendNonBlocking queues a packet for transmission (non-blocking, lossy).
//
// Unlike QueueSend(), this method drops the packet immediately if the send queue
// is full. This is used for broadcast messages, stage updates, and other packets
// where occasional packet loss is acceptable (client will re-sync or request again).
//
// Use cases:
// - Stage broadcasts (player movement, chat)
// - Server-wide announcements
// - Non-critical status updates
//
// Thread Safety: Safe for concurrent calls from multiple goroutines.
func (s *Session) QueueSendNonBlocking(data []byte) {
select {
case s.sendPackets <- packet{data, true}:
@@ -124,7 +188,15 @@ func (s *Session) QueueSendNonBlocking(data []byte) {
}
}
// QueueSendMHF queues a MHFPacket to be sent.
// QueueSendMHF queues a structured MHFPacket for transmission to the client.
//
// This is a convenience method that:
// 1. Creates a byteframe and writes the packet opcode
// 2. Calls the packet's Build() method to serialize its data
// 3. Queues the resulting bytes using QueueSend()
//
// The packet is built with the session's clientContext, allowing version-specific
// packet formatting when needed.
func (s *Session) QueueSendMHF(pkt mhfpacket.MHFPacket) {
// Make the header
bf := byteframe.NewByteFrame()
@@ -137,7 +209,15 @@ func (s *Session) QueueSendMHF(pkt mhfpacket.MHFPacket) {
s.QueueSend(bf.Data())
}
// QueueAck is a helper function to queue an MSG_SYS_ACK with the given ack handle and data.
// QueueAck sends an acknowledgment packet with optional response data.
//
// Many client packets include an "ack handle" field - a unique identifier the client
// uses to match responses to requests. This method constructs and queues a MSG_SYS_ACK
// packet containing the ack handle and response data.
//
// Parameters:
// - ackHandle: The ack handle from the original client packet
// - data: Response payload bytes (can be empty for simple acks)
func (s *Session) QueueAck(ackHandle uint32, data []byte) {
bf := byteframe.NewByteFrame()
bf.WriteUint16(uint16(network.MSG_SYS_ACK))

View File

@@ -7,49 +7,94 @@ import (
"erupe-ce/network/mhfpacket"
)
// Object holds infomation about a specific object.
// Object represents a placeable object in a stage (e.g., ballista, bombs, traps).
//
// Objects are spawned by players during quests and can be interacted with by
// other players in the same stage. Each object has an owner, position, and
// unique ID for client-server synchronization.
type Object struct {
sync.RWMutex
id uint32
ownerCharID uint32
x, y, z float32
sync.RWMutex // Protects object state during updates
id uint32 // Unique object ID (see NextObjectID for ID generation)
ownerCharID uint32 // Character ID of the player who placed this object
x, y, z float32 // 3D position coordinates
}
// stageBinaryKey is a struct used as a map key for identifying a stage binary part.
// stageBinaryKey is a composite key for identifying a specific piece of stage binary data.
//
// Stage binary data is custom game state that the stage host (quest leader) sets
// and the server echoes to other clients. It's used for quest state, monster HP,
// environmental conditions, etc. The data is keyed by two ID bytes.
type stageBinaryKey struct {
id0 uint8
id1 uint8
id0 uint8 // First binary data identifier
id1 uint8 // Second binary data identifier
}
// Stage holds stage-specific information
// Stage represents a game room/area where players interact.
//
// Stages are the core spatial concept in Monster Hunter Frontier. They represent:
// - Town areas (Mezeporta, Pallone, etc.) - persistent, always exist
// - Quest instances - created dynamically when a player starts a quest
// - Private rooms - password-protected player gathering areas
//
// Stage Lifecycle:
// 1. Created via NewStage() or MSG_SYS_CREATE_STAGE packet
// 2. Players enter via MSG_SYS_ENTER_STAGE or MSG_SYS_MOVE_STAGE
// 3. Stage host manages state via binary data packets
// 4. Destroyed via MSG_SYS_STAGE_DESTRUCT when empty or quest completes
//
// Client Participation:
// There are two types of client participation:
// - Active clients (in clients map): Currently in the stage, receive broadcasts
// - Reserved slots (in reservedClientSlots): Quest participants who haven't
// entered yet (e.g., loading screen, preparing). They hold a slot but don't
// receive stage broadcasts until they fully enter.
//
// Thread Safety:
// Stage embeds sync.RWMutex. Use RLock for reads (broadcasts, queries) and
// Lock for writes (entering, leaving, state changes).
type Stage struct {
sync.RWMutex
sync.RWMutex // Protects all stage state during concurrent access
// Stage ID string
id string
// Stage identity
id string // Stage ID string (e.g., "sl1Ns200p0a0u0" for Mezeporta)
// Objects
objects map[uint32]*Object
objectIndex uint8
// Objects in the stage (ballistas, bombs, traps, etc.)
objects map[uint32]*Object // Active objects keyed by object ID
objectIndex uint8 // Auto-incrementing index for object ID generation
// Map of session -> charID.
// These are clients that are CURRENTLY in the stage
clients map[*Session]uint32
// Active participants
clients map[*Session]uint32 // Sessions currently in stage -> their character ID
// Map of charID -> bool, key represents whether they are ready
// These are clients that aren't in the stage, but have reserved a slot (for quests, etc).
reservedClientSlots map[uint32]bool
// Reserved slots for quest participants
// Map of charID -> ready status. These players have reserved a slot but
// haven't fully entered yet (e.g., still loading, in preparation screen)
reservedClientSlots map[uint32]bool // Character ID -> ready flag
// These are raw binary blobs that the stage owner sets,
// other clients expect the server to echo them back in the exact same format.
rawBinaryData map[stageBinaryKey][]byte
// Stage binary data
// Raw binary blobs set by the stage host (quest leader) that track quest state.
// The server stores and echoes this data to clients verbatim. Used for:
// - Monster HP and status
// - Environmental state (time remaining, weather)
// - Quest objectives and progress
rawBinaryData map[stageBinaryKey][]byte // Binary state keyed by (id0, id1)
host *Session
maxPlayers uint16
password string
// Stage settings
host *Session // Stage host (quest leader, room creator)
maxPlayers uint16 // Maximum players allowed (default 4)
password string // Password for private stages (empty if public)
}
// NewStage creates a new stage with intialized values.
// NewStage creates and initializes a new Stage with the given ID.
//
// The stage is created with:
// - Empty client and reserved slot maps
// - Empty object map with objectIndex starting at 0
// - Empty binary data map
// - Default max players set to 4 (standard quest party size)
// - No password (public stage)
//
// For persistent town stages, this is called during server initialization.
// For dynamic quest stages, this is called when a player creates a quest.
func NewStage(ID string) *Stage {
s := &Stage{
id: ID,
@@ -63,7 +108,24 @@ func NewStage(ID string) *Stage {
return s
}
// BroadcastMHF queues a MHFPacket to be sent to all sessions in the stage.
// BroadcastMHF sends a packet to all players currently in the stage.
//
// This method is used for stage-local events like:
// - Player chat messages within the stage
// - Monster state updates
// - Object placement/removal notifications
// - Quest events visible only to stage participants
//
// The packet is built individually for each client to support version-specific
// formatting. Packets are sent non-blocking (dropped if queue full).
//
// Reserved clients (those who haven't fully entered) do NOT receive broadcasts.
//
// Parameters:
// - pkt: The MHFPacket to broadcast to stage participants
// - ignoredSession: Optional session to exclude (typically the sender)
//
// Thread Safety: This method holds the stage lock during iteration.
func (s *Stage) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) {
s.Lock()
defer s.Unlock()
@@ -96,6 +158,18 @@ func (s *Stage) isQuest() bool {
return len(s.reservedClientSlots) > 0
}
// NextObjectID generates the next available object ID for this stage.
//
// Object IDs have special constraints due to client limitations:
// - Index 0 does not update position correctly (avoided)
// - Index 127 does not update position correctly (avoided)
// - Indices > 127 do not replicate correctly across clients (avoided)
//
// The ID is generated by packing bytes into a uint32 in a specific format
// expected by the client. The objectIndex cycles from 1-126 to stay within
// valid bounds.
//
// Thread Safety: Caller must hold stage lock when calling this method.
func (s *Stage) NextObjectID() uint32 {
s.objectIndex = s.objectIndex + 1
// Objects beyond 127 do not duplicate correctly