doc: inline code documentation.

This commit is contained in:
Houmgaor
2025-11-24 18:41:37 +01:00
parent a992c5a603
commit 64cc285fd8
11 changed files with 697 additions and 181 deletions

View File

@@ -1,3 +1,50 @@
// Package mhfpacket provides Monster Hunter Frontier packet definitions and interfaces.
//
// This package contains:
// - MHFPacket interface: The common interface all packets implement
// - 400+ packet type definitions in msg_*.go files
// - Packet parsing (client -> server) and building (server -> client) logic
// - Opcode-to-packet-type mapping via FromOpcode()
//
// Packet Structure:
//
// MHF packets follow this wire format:
// [2 bytes: Opcode][N bytes: Packet-specific data][2 bytes: Footer 0x00 0x10]
//
// Each packet type defines its own structure matching the binary format expected
// by the Monster Hunter Frontier client.
//
// Implementing a New Packet:
//
// 1. Create msg_mhf_your_packet.go with packet struct
// 2. Implement Parse() to read data from ByteFrame
// 3. Implement Build() to write data to ByteFrame
// 4. Implement Opcode() to return the packet's ID
// 5. Register in opcodeToPacketMap in opcode_mapping.go
// 6. Add handler in server/channelserver/handlers_table.go
//
// Example:
//
// type MsgMhfYourPacket struct {
// AckHandle uint32 // Common field for request/response matching
// SomeField uint16
// }
//
// func (m *MsgMhfYourPacket) Opcode() network.PacketID {
// return network.MSG_MHF_YOUR_PACKET
// }
//
// func (m *MsgMhfYourPacket) Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
// m.AckHandle = bf.ReadUint32()
// m.SomeField = bf.ReadUint16()
// return nil
// }
//
// func (m *MsgMhfYourPacket) Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
// bf.WriteUint32(m.AckHandle)
// bf.WriteUint16(m.SomeField)
// return nil
// }
package mhfpacket package mhfpacket
import ( import (
@@ -6,22 +53,52 @@ import (
"erupe-ce/network/clientctx" "erupe-ce/network/clientctx"
) )
// Parser is the interface that wraps the Parse method. // Parser is the interface for deserializing packets from wire format.
//
// The Parse method reads packet data from a ByteFrame (binary stream) and
// populates the packet struct's fields. It's called when a packet arrives
// from the client.
//
// Parameters:
// - bf: ByteFrame positioned after the opcode (contains packet payload)
// - ctx: Client context (version info, capabilities) for version-specific parsing
//
// Returns an error if the packet data is malformed or incomplete.
type Parser interface { type Parser interface {
Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error
} }
// Builder is the interface that wraps the Build method. // Builder is the interface for serializing packets to wire format.
//
// The Build method writes the packet struct's fields to a ByteFrame (binary stream)
// in the format expected by the client. It's called when sending a packet to the client.
//
// Parameters:
// - bf: ByteFrame to write packet data to (opcode already written by caller)
// - ctx: Client context (version info, capabilities) for version-specific building
//
// Returns an error if serialization fails.
type Builder interface { type Builder interface {
Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error
} }
// Opcoder is the interface that wraps the Opcode method. // Opcoder is the interface for identifying a packet's opcode.
//
// The Opcode method returns the unique packet identifier used for routing
// packets to their handlers and for packet logging/debugging.
type Opcoder interface { type Opcoder interface {
Opcode() network.PacketID Opcode() network.PacketID
} }
// MHFPacket is the interface that groups the Parse, Build, and Opcode methods. // MHFPacket is the unified interface that all Monster Hunter Frontier packets implement.
//
// Every packet type must be able to:
// - Parse itself from binary data (Parser)
// - Build itself into binary data (Builder)
// - Identify its opcode (Opcoder)
//
// This interface allows the packet handling system to work generically across
// all packet types while maintaining type safety through type assertions in handlers.
type MHFPacket interface { type MHFPacket interface {
Parser Parser
Builder Builder

View File

@@ -8,14 +8,34 @@ import (
"erupe-ce/network/clientctx" "erupe-ce/network/clientctx"
) )
// MsgMhfEnumerateQuest represents the MSG_MHF_ENUMERATE_QUEST // MsgMhfEnumerateQuest is sent by the client to request a paginated list of available quests.
//
// This packet is used when:
// - Accessing the quest counter/board in town
// - Scrolling through quest lists
// - Switching between quest categories/worlds
//
// The server responds with quest metadata and binary file paths. The client then
// loads quest details from binary files on disk or via MSG_SYS_GET_FILE.
//
// Pagination:
// Quest lists can be very large (hundreds of quests). The client requests quests
// in batches using the Offset field:
// - Offset 0: First batch (quests 0-N)
// - Offset N: Next batch (quests N-2N)
// - Continues until server returns no more quests
//
// World Types:
// - 0: Newbie World (beginner quests)
// - 1: Normal World (standard quests)
// - 2+: Other world categories (events, special quests)
type MsgMhfEnumerateQuest struct { type MsgMhfEnumerateQuest struct {
AckHandle uint32 AckHandle uint32 // Response handle for matching server response to request
Unk0 uint8 // Hardcoded 0 in the binary Unk0 uint8 // Hardcoded 0 in the binary (purpose unknown)
World uint8 World uint8 // World ID/category to enumerate quests for
Counter uint16 Counter uint16 // Client counter for tracking sequential requests
Offset uint16 // Increments to request following batches of quests Offset uint16 // Pagination offset - increments by batch size for next page
Unk4 uint8 // Hardcoded 0 in the binary Unk4 uint8 // Hardcoded 0 in the binary (purpose unknown)
} }
// Opcode returns the ID associated with this packet type. // Opcode returns the ID associated with this packet type.

View File

@@ -9,11 +9,29 @@ import (
"erupe-ce/network/clientctx" "erupe-ce/network/clientctx"
) )
// MsgSysEnterStage represents the MSG_SYS_ENTER_STAGE // MsgSysEnterStage is sent by the client to enter an existing stage.
//
// This packet is used when:
// - Moving from one town area to another (e.g., Mezeporta -> Pallone)
// - Joining another player's room or quest
// - Entering a persistent stage that already exists
//
// The stage must already exist on the server. For creating new stages (quests, rooms),
// use MSG_SYS_CREATE_STAGE followed by MSG_SYS_ENTER_STAGE.
//
// Stage ID Format:
// Stage IDs are encoded strings like "sl1Ns200p0a0u0" that identify specific
// game areas:
// - sl1Ns200p0a0u0: Mezeporta (main town)
// - sl1Ns211p0a0u0: Rasta bar
// - Quest stages: Dynamic IDs created when quests start
//
// After entering, the session's stage pointer is updated and the player receives
// broadcasts from other players in that stage.
type MsgSysEnterStage struct { type MsgSysEnterStage struct {
AckHandle uint32 AckHandle uint32 // Response handle for acknowledgment
UnkBool uint8 UnkBool uint8 // Boolean flag (purpose unknown, possibly force-enter)
StageID string StageID string // ID of the stage to enter (length-prefixed string)
} }
// Opcode returns the ID associated with this packet type. // Opcode returns the ID associated with this packet type.

File diff suppressed because one or more lines are too long

View File

@@ -11,11 +11,28 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
// handleMsgSysCreateStage creates a new stage (room/quest instance).
//
// This is called when a player:
// - Posts a quest
// - Creates a private room
// - Initiates any activity requiring a new stage instance
//
// The handler:
// 1. Checks if stage already exists (return failure if it does)
// 2. Creates new stage with the requesting session as host
// 3. Sets max player count from packet
// 4. Adds stage to server's stage map
// 5. Responds with success/failure
//
// Note: This only creates the stage; the player must call MSG_SYS_ENTER_STAGE
// to actually enter it after creation.
func handleMsgSysCreateStage(s *Session, p mhfpacket.MHFPacket) { func handleMsgSysCreateStage(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgSysCreateStage) pkt := p.(*mhfpacket.MsgSysCreateStage)
s.server.Lock() s.server.Lock()
defer s.server.Unlock() defer s.server.Unlock()
if _, exists := s.server.stages[pkt.StageID]; exists { if _, exists := s.server.stages[pkt.StageID]; exists {
// Stage already exists, cannot create duplicate
doAckSimpleFail(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00}) doAckSimpleFail(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
} else { } else {
stage := NewStage(pkt.StageID) stage := NewStage(pkt.StageID)
@@ -28,6 +45,27 @@ func handleMsgSysCreateStage(s *Session, p mhfpacket.MHFPacket) {
func handleMsgSysStageDestruct(s *Session, p mhfpacket.MHFPacket) {} func handleMsgSysStageDestruct(s *Session, p mhfpacket.MHFPacket) {}
// doStageTransfer handles the common logic for entering/moving to a stage.
//
// This is a helper function called by handleMsgSysEnterStage and handleMsgSysMoveStage.
// It performs the full stage entry process:
//
// 1. Find or create the target stage
// 2. Add session to the stage's client map
// 3. Remove session from previous stage (if any)
// 4. Update session's stage pointers
// 5. Send cleanup command to client (clear old stage objects)
// 6. Send acknowledgment
// 7. Synchronize existing stage objects to the new player
// 8. Notify other players in the stage about new player
//
// If the stage doesn't exist, it creates it automatically (for persistent town stages).
// For quest stages, MSG_SYS_CREATE_STAGE should be called first.
//
// Parameters:
// - s: The session entering the stage
// - ackHandle: The ack handle to respond to
// - stageID: The stage ID to enter
func doStageTransfer(s *Session, ackHandle uint32, stageID string) { func doStageTransfer(s *Session, ackHandle uint32, stageID string) {
s.server.Lock() s.server.Lock()
stage, exists := s.server.stages[stageID] stage, exists := s.server.stages[stageID]
@@ -37,7 +75,7 @@ func doStageTransfer(s *Session, ackHandle uint32, stageID string) {
stage.Lock() stage.Lock()
stage.clients[s] = s.charID stage.clients[s] = s.charID
stage.Unlock() stage.Unlock()
} else { // Create new stage object } else { // Create new stage object (for persistent stages like towns)
s.server.Lock() s.server.Lock()
s.server.stages[stageID] = NewStage(stageID) s.server.stages[stageID] = NewStage(stageID)
stage = s.server.stages[stageID] stage = s.server.stages[stageID]
@@ -48,21 +86,21 @@ func doStageTransfer(s *Session, ackHandle uint32, stageID string) {
stage.Unlock() stage.Unlock()
} }
// Ensure this session no longer belongs to reservations. // Ensure this session no longer belongs to their previous stage
if s.stage != nil { if s.stage != nil {
removeSessionFromStage(s) removeSessionFromStage(s)
} }
// Save our new stage ID and pointer to the new stage itself. // Save our new stage ID and pointer to the new stage itself
s.Lock() s.Lock()
s.stageID = stageID s.stageID = stageID
s.stage = s.server.stages[stageID] s.stage = s.server.stages[stageID]
s.Unlock() s.Unlock()
// Tell the client to cleanup its current stage objects. // Tell the client to cleanup its current stage objects
s.QueueSendMHF(&mhfpacket.MsgSysCleanupObject{}) s.QueueSendMHF(&mhfpacket.MsgSysCleanupObject{})
// Confirm the stage entry. // Confirm the stage entry
doAckSimpleSucceed(s, ackHandle, []byte{0x00, 0x00, 0x00, 0x00}) doAckSimpleSucceed(s, ackHandle, []byte{0x00, 0x00, 0x00, 0x00})
var temp mhfpacket.MHFPacket var temp mhfpacket.MHFPacket

View File

@@ -5,10 +5,49 @@ import (
"erupe-ce/network/mhfpacket" "erupe-ce/network/mhfpacket"
) )
// handlerFunc is the signature for all packet handler functions.
//
// Handler functions are called when a packet with a matching opcode is received.
// They process the packet and typically respond using the session's Queue methods.
//
// Parameters:
// - s: The session that received the packet (contains player state, connection)
// - p: The parsed packet (must be type-asserted to the specific packet type)
//
// Handler functions should:
// 1. Type-assert the packet to its specific type
// 2. Validate the packet data and session state
// 3. Perform the requested operation (database query, state change, etc.)
// 4. Send a response using doAckBufSucceed/Fail or s.QueueSendMHF
// 5. Handle errors gracefully (log and send error response to client)
type handlerFunc func(s *Session, p mhfpacket.MHFPacket) type handlerFunc func(s *Session, p mhfpacket.MHFPacket)
// handlerTable maps packet opcodes to their handler functions.
//
// This is the central routing table for all incoming packets. When a packet
// arrives, the session's handlePacketGroup() function:
// 1. Reads the opcode from the packet header
// 2. Looks up the handler in this table
// 3. Calls the handler with the session and parsed packet
//
// The table is initialized in init() and contains ~400+ packet handlers covering:
// - System packets (MSG_SYS_*): Connection, stages, objects, semaphores
// - MHF packets (MSG_MHF_*): Game features (quests, guilds, items, events)
// - CA packets (MSG_CA_*): Caravan system
//
// If a packet has no registered handler, it's ignored (logged in dev mode).
var handlerTable map[network.PacketID]handlerFunc var handlerTable map[network.PacketID]handlerFunc
// init registers all packet handlers in the handlerTable.
//
// Handlers are organized by feature:
// - handlers_*.go files implement related handler functions
// - This init function registers them all in the central table
//
// Adding a new handler:
// 1. Implement handleMsgYourPacket() in appropriate handlers_*.go file
// 2. Add registration here: handlerTable[network.MSG_YOUR_PACKET] = handleMsgYourPacket
// 3. Define the packet structure in network/mhfpacket/msg_*.go
func init() { func init() {
handlerTable = make(map[network.PacketID]handlerFunc) handlerTable = make(map[network.PacketID]handlerFunc)
handlerTable[network.MSG_HEAD] = handleMsgHead handlerTable[network.MSG_HEAD] = handleMsgHead

View File

@@ -1,3 +1,29 @@
// Package channelserver implements the Monster Hunter Frontier channel server.
//
// The channel server is the core gameplay component that handles actual game sessions,
// quests, player interactions, and all in-game activities. It uses a stage-based
// architecture where players move between stages (game areas/rooms) and interact
// with other players in real-time.
//
// Architecture Overview:
//
// The channel server manages three primary concepts:
// - Sessions: Individual player connections with their state and packet queues
// - Stages: Game rooms/areas where players interact (towns, quests, lobbies)
// - Semaphores: Resource locks for coordinating multiplayer activities (quests, events)
//
// Multiple channel servers can run simultaneously on different ports, allowing
// horizontal scaling and separation of different world types (Newbie, Normal, etc).
//
// Thread Safety:
//
// This package extensively uses goroutines and shared state. All shared resources
// are protected by mutexes. When modifying code, always consider thread safety:
// - Server-level: s.Lock() / s.Unlock() for session map
// - Stage-level: s.stagesLock.RLock() / s.stagesLock.Lock() for stage map
// - Session-level: session.Lock() / session.Unlock() for session state
//
// Use 'go test -race ./...' to detect race conditions during development.
package channelserver package channelserver
import ( import (
@@ -16,91 +42,120 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
// Config struct allows configuring the server. // Config holds configuration parameters for creating a new channel server.
type Config struct { type Config struct {
ID uint16 ID uint16 // Channel server ID (unique identifier)
Logger *zap.Logger Logger *zap.Logger // Logger instance for this channel server
DB *sqlx.DB DB *sqlx.DB // Database connection pool
DiscordBot *discordbot.DiscordBot DiscordBot *discordbot.DiscordBot // Optional Discord bot for chat integration
ErupeConfig *config.Config ErupeConfig *config.Config // Global Erupe configuration
Name string Name string // Display name for the server (shown in broadcasts)
Enable bool Enable bool // Whether this server is enabled
} }
// Map key type for a user binary part. // userBinaryPartID is a composite key for identifying a specific part of a user's binary data.
// User binary data is split into multiple indexed parts and stored separately.
type userBinaryPartID struct { type userBinaryPartID struct {
charID uint32 charID uint32 // Character ID who owns this binary data
index uint8 index uint8 // Part index (binary data is chunked into multiple parts)
} }
// Server is a MHF channel server. // Server represents a Monster Hunter Frontier channel server instance.
//
// The Server manages all active player sessions, game stages, and shared resources.
// It runs two main goroutines: one for accepting connections and one for managing
// the session lifecycle.
//
// Thread Safety:
// Server embeds sync.Mutex for protecting the sessions map. Use Lock()/Unlock()
// when reading or modifying s.sessions. The stages map uses a separate RWMutex
// (stagesLock) to allow concurrent reads during normal gameplay.
type Server struct { type Server struct {
sync.Mutex sync.Mutex // Protects sessions map and isShuttingDown flag
Channels []*Server
ID uint16
GlobalID string
IP string
Port uint16
logger *zap.Logger
db *sqlx.DB
erupeConfig *config.Config
acceptConns chan net.Conn
deleteConns chan net.Conn
sessions map[net.Conn]*Session
listener net.Listener // Listener that is created when Server.Start is called.
isShuttingDown bool
stagesLock sync.RWMutex // Server identity and network configuration
stages map[string]*Stage Channels []*Server // Reference to all channel servers (for world broadcasts)
ID uint16 // This server's ID
GlobalID string // Global identifier string
IP string // Server IP address
Port uint16 // Server listening port
// Used to map different languages // Core dependencies
dict map[string]string logger *zap.Logger // Logger instance
db *sqlx.DB // Database connection pool
erupeConfig *config.Config // Global configuration
// UserBinary // Connection management
userBinaryPartsLock sync.RWMutex acceptConns chan net.Conn // Channel for new accepted connections
userBinaryParts map[userBinaryPartID][]byte deleteConns chan net.Conn // Channel for connections to be cleaned up
sessions map[net.Conn]*Session // Active sessions keyed by connection
listener net.Listener // TCP listener (created when Server.Start is called)
isShuttingDown bool // Shutdown flag to stop goroutines gracefully
// Semaphore // Stage (game room) management
semaphoreLock sync.RWMutex stagesLock sync.RWMutex // Protects stages map (RWMutex for concurrent reads)
semaphore map[string]*Semaphore stages map[string]*Stage // Active stages keyed by stage ID string
semaphoreIndex uint32
// Discord chat integration // Localization
discordBot *discordbot.DiscordBot dict map[string]string // Language string mappings for server messages
name string // User binary data storage
// Binary data is player-specific custom data that the client stores on the server
userBinaryPartsLock sync.RWMutex // Protects userBinaryParts map
userBinaryParts map[userBinaryPartID][]byte // Chunked binary data by character
raviente *Raviente // Semaphore (multiplayer coordination) management
semaphoreLock sync.RWMutex // Protects semaphore map and semaphoreIndex
semaphore map[string]*Semaphore // Active semaphores keyed by semaphore ID
semaphoreIndex uint32 // Auto-incrementing ID for new semaphores (starts at 7)
// Optional integrations
discordBot *discordbot.DiscordBot // Discord bot for chat relay (nil if disabled)
name string // Server display name (used in chat messages)
// Special event system: Raviente (large-scale multiplayer raid)
raviente *Raviente // Raviente event state and coordination
} }
// Raviente manages the Raviente raid event, a large-scale multiplayer encounter.
//
// Raviente is a special monster that requires coordination between many players
// across multiple phases. This struct tracks registration, event state, and
// support/assistance data for the active Raviente encounter.
type Raviente struct { type Raviente struct {
sync.Mutex sync.Mutex // Protects all Raviente data during concurrent access
register *RavienteRegister register *RavienteRegister // Player registration and event timing
state *RavienteState state *RavienteState // Current state of the Raviente encounter
support *RavienteSupport support *RavienteSupport // Support/assistance tracking data
} }
// RavienteRegister tracks player registration and timing for Raviente events.
type RavienteRegister struct { type RavienteRegister struct {
nextTime uint32 nextTime uint32 // Timestamp for next Raviente event
startTime uint32 startTime uint32 // Event start timestamp
postTime uint32 postTime uint32 // Event post-completion timestamp
killedTime uint32 killedTime uint32 // Timestamp when Raviente was defeated
ravienteType uint32 ravienteType uint32 // Raviente variant (2=Berserk, 3=Extreme, 4=Extreme Limited, 5=Berserk Small)
maxPlayers uint32 maxPlayers uint32 // Maximum players allowed (determines scaling)
carveQuest uint32 carveQuest uint32 // Quest ID for carving phase after defeat
register []uint32 register []uint32 // List of registered player IDs (up to 5 slots)
} }
// RavienteState holds the dynamic state data for an active Raviente encounter.
// The state array contains 29 uint32 values tracking encounter progress.
type RavienteState struct { type RavienteState struct {
stateData []uint32 stateData []uint32 // Raviente encounter state (29 values)
} }
// RavienteSupport tracks support and assistance data for Raviente encounters.
// The support array contains 25 uint32 values for coordination features.
type RavienteSupport struct { type RavienteSupport struct {
supportData []uint32 supportData []uint32 // Support/assistance data (25 values)
} }
// Set up the Raviente variables for the server // NewRaviente creates and initializes a new Raviente event manager with default values.
// All state and support arrays are initialized to zero, ready for a new event.
func NewRaviente() *Raviente { func NewRaviente() *Raviente {
ravienteRegister := &RavienteRegister{ ravienteRegister := &RavienteRegister{
nextTime: 0, nextTime: 0,
@@ -125,6 +180,15 @@ func NewRaviente() *Raviente {
return raviente return raviente
} }
// GetRaviMultiplier calculates the difficulty multiplier for Raviente based on player count.
//
// Raviente scales its difficulty based on the number of active participants. If there
// are fewer players than the minimum threshold, the encounter becomes easier by returning
// a multiplier < 1. Returns 1.0 for full groups, or 0 if the semaphore doesn't exist.
//
// Minimum player thresholds:
// - Large Raviente (maxPlayers > 8): 24 players minimum
// - Small Raviente (maxPlayers <= 8): 4 players minimum
func (r *Raviente) GetRaviMultiplier(s *Server) float64 { func (r *Raviente) GetRaviMultiplier(s *Server) float64 {
raviSema := getRaviSemaphore(s) raviSema := getRaviSemaphore(s)
if raviSema != nil { if raviSema != nil {
@@ -142,7 +206,19 @@ func (r *Raviente) GetRaviMultiplier(s *Server) float64 {
return 0 return 0
} }
// NewServer creates a new Server type. // NewServer creates and initializes a new channel server with the given configuration.
//
// The server is initialized with default persistent stages (town areas that always exist):
// - sl1Ns200p0a0u0: Mezeporta (main town)
// - sl1Ns211p0a0u0: Rasta bar
// - sl1Ns260p0a0u0: Pallone Caravan
// - sl1Ns262p0a0u0: Pallone Guest House 1st Floor
// - sl1Ns263p0a0u0: Pallone Guest House 2nd Floor
// - sl2Ns379p0a0u0: Diva fountain / prayer fountain
// - sl1Ns462p0a0u0: MezFes (festival area)
//
// Additional dynamic stages are created by players when they create quests or rooms.
// The semaphore index starts at 7 to avoid reserved IDs 0-6.
func NewServer(config *Config) *Server { func NewServer(config *Config) *Server {
s := &Server{ s := &Server{
ID: config.ID, ID: config.ID,
@@ -187,7 +263,16 @@ func NewServer(config *Config) *Server {
return s return s
} }
// Start starts the server in a new goroutine. // Start begins listening for connections and starts the server's main goroutines.
//
// This method:
// 1. Creates a TCP listener on the configured port
// 2. Launches acceptClients() goroutine to accept new connections
// 3. Launches manageSessions() goroutine to handle session lifecycle
// 4. Optionally starts Discord chat integration
//
// Returns an error if the listener cannot be created (e.g., port in use).
// The server runs asynchronously after Start() returns successfully.
func (s *Server) Start() error { func (s *Server) Start() error {
l, err := net.Listen("tcp", fmt.Sprintf(":%d", s.Port)) l, err := net.Listen("tcp", fmt.Sprintf(":%d", s.Port))
if err != nil { if err != nil {
@@ -206,7 +291,15 @@ func (s *Server) Start() error {
return nil return nil
} }
// Shutdown tries to shut down the server gracefully. // Shutdown gracefully stops the server and all its goroutines.
//
// This method:
// 1. Sets the shutdown flag to stop accepting new connections
// 2. Closes the TCP listener (causes acceptClients to exit)
// 3. Closes the acceptConns channel (signals manageSessions to exit)
//
// Existing sessions are not forcibly disconnected but will eventually timeout
// or disconnect naturally. For a complete shutdown, wait for all sessions to close.
func (s *Server) Shutdown() { func (s *Server) Shutdown() {
s.Lock() s.Lock()
s.isShuttingDown = true s.isShuttingDown = true
@@ -267,7 +360,17 @@ func (s *Server) manageSessions() {
} }
} }
// BroadcastMHF queues a MHFPacket to be sent to all sessions. // BroadcastMHF sends a packet to all active sessions on this channel server.
//
// The packet is built individually for each session to handle per-session state
// (like client version differences). Packets are queued in a non-blocking manner,
// so if a session's queue is full, the packet is dropped for that session only.
//
// Parameters:
// - pkt: The MHFPacket to broadcast to all sessions
// - ignoredSession: Optional session to exclude from the broadcast (typically the sender)
//
// Thread Safety: This method locks the server's session map during iteration.
func (s *Server) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) { func (s *Server) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) {
// Broadcast the data. // Broadcast the data.
s.Lock() s.Lock()
@@ -289,6 +392,16 @@ func (s *Server) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session)
} }
} }
// WorldcastMHF broadcasts a packet to all channel servers (world-wide broadcast).
//
// This is used for server-wide announcements like Raviente events that should be
// visible to all players across all channels. The packet is sent to every channel
// server except the one specified in ignoredChannel.
//
// Parameters:
// - pkt: The MHFPacket to broadcast across all channels
// - ignoredSession: Optional session to exclude from broadcasts
// - ignoredChannel: Optional channel server to skip (typically the originating channel)
func (s *Server) WorldcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session, ignoredChannel *Server) { func (s *Server) WorldcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session, ignoredChannel *Server) {
for _, c := range s.Channels { for _, c := range s.Channels {
if c == ignoredChannel { if c == ignoredChannel {
@@ -298,7 +411,13 @@ func (s *Server) WorldcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session,
} }
} }
// BroadcastChatMessage broadcasts a simple chat message to all the sessions. // BroadcastChatMessage sends a simple chat message to all sessions on this server.
//
// The message appears as a system message with the server's configured name as the sender.
// This is typically used for server announcements, maintenance notifications, or events.
//
// Parameters:
// - message: The text message to broadcast to all players
func (s *Server) BroadcastChatMessage(message string) { func (s *Server) BroadcastChatMessage(message string) {
bf := byteframe.NewByteFrame() bf := byteframe.NewByteFrame()
bf.SetLE() bf.SetLE()

View File

@@ -7,27 +7,62 @@ import (
"sync" "sync"
) )
// Stage holds stage-specific information // Semaphore is a multiplayer coordination mechanism for quests and events.
//
// Despite the name, Semaphore is NOT an OS synchronization primitive (like sync.Semaphore).
// Instead, it's a game-specific resource lock that coordinates multiplayer activities where:
// - Players must acquire a semaphore before participating
// - A limited number of participants are allowed (maxPlayers)
// - The semaphore tracks both active and reserved participants
//
// Use Cases:
// - Quest coordination: Ensures quest party size limits are enforced
// - Event coordination: Raviente, VS Tournament, Diva Defense
// - Global resources: Prevents multiple groups from starting conflicting events
//
// Semaphore vs Stage:
// - Stages are spatial (game rooms, areas). Players in a stage can see each other.
// - Semaphores are logical (coordination locks). Players in a semaphore are
// participating in the same activity but may be in different stages.
//
// Example: Raviente Event
// - Players acquire the Raviente semaphore to register for the event
// - Multiple quest stages exist (preparation, phase 1, phase 2, carving)
// - All participants share the same semaphore across different stages
// - The semaphore enforces the 32-player limit across all stages
//
// Thread Safety:
// Semaphore embeds sync.RWMutex. Use RLock for reads and Lock for writes.
type Semaphore struct { type Semaphore struct {
sync.RWMutex sync.RWMutex // Protects semaphore state during concurrent access
// Stage ID string // Semaphore identity
id_semaphore string id_semaphore string // Semaphore ID string (identifies the resource/activity)
id uint32 // Numeric ID for client communication (auto-generated, starts at 7)
id uint32 // Active participants
clients map[*Session]uint32 // Sessions actively using this semaphore -> character ID
// Map of session -> charID. // Reserved slots
// These are clients that are CURRENTLY in the stage // Players who have acquired the semaphore but may not be actively in the stage yet.
clients map[*Session]uint32 // The value is always nil; only the key (charID) matters. This is a set implementation.
reservedClientSlots map[uint32]interface{} // Character ID -> nil (set of reserved IDs)
// Map of charID -> interface{}, only the key is used, value is always nil. // Capacity
reservedClientSlots map[uint32]interface{} maxPlayers uint16 // Maximum concurrent participants (e.g., 4 for quests, 32 for Raviente)
// Max Players for Semaphore
maxPlayers uint16
} }
// NewStage creates a new stage with intialized values. // NewSemaphore creates and initializes a new Semaphore for coordinating an activity.
//
// The semaphore is assigned an auto-incrementing ID from the server's semaphoreIndex.
// IDs 0-6 are reserved, so the first semaphore gets ID 7.
//
// Parameters:
// - s: The server (used to generate unique semaphore ID)
// - ID: Semaphore ID string (identifies the activity/resource)
// - MaxPlayers: Maximum participants allowed
//
// Returns a new Semaphore ready for client acquisition.
func NewSemaphore(s *Server, ID string, MaxPlayers uint16) *Semaphore { func NewSemaphore(s *Server, ID string, MaxPlayers uint16) *Semaphore {
sema := &Semaphore{ sema := &Semaphore{
id_semaphore: ID, id_semaphore: ID,
@@ -55,7 +90,22 @@ func (s *Semaphore) BroadcastRavi(pkt mhfpacket.MHFPacket) {
} }
} }
// BroadcastMHF queues a MHFPacket to be sent to all sessions in the stage. // BroadcastMHF sends a packet to all active participants in the semaphore.
//
// This is used for event-wide announcements that all participants need to see,
// regardless of which stage they're currently in. Examples:
// - Raviente phase changes
// - Tournament updates
// - Event completion notifications
//
// Only active clients (in the clients map) receive broadcasts. Reserved clients
// who haven't fully joined yet are excluded.
//
// Parameters:
// - pkt: The MHFPacket to broadcast to all participants
// - ignoredSession: Optional session to exclude from broadcast
//
// Thread Safety: Caller should hold semaphore lock when iterating clients.
func (s *Semaphore) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) { func (s *Semaphore) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) {
// Broadcast the data. // Broadcast the data.
for session := range s.clients { for session := range s.clients {

View File

@@ -18,54 +18,85 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
// packet is an internal wrapper for queued outbound packets.
type packet struct { type packet struct {
data []byte data []byte // Raw packet bytes to send
nonBlocking bool nonBlocking bool // If true, drop packet if queue is full instead of blocking
} }
// Session holds state for the channel server connection. // Session represents an active player connection to the channel server.
//
// Each Session manages a single player's connection lifecycle, including:
// - Packet send/receive loops running in separate goroutines
// - Current stage (game area) and stage movement history
// - Character state (ID, courses, guild, etc.)
// - Mail system state
// - Quest/semaphore participation
//
// Lifecycle:
// 1. Created by NewSession() when a player connects
// 2. Started with Start() which launches send/recv goroutines
// 3. Processes packets through handlePacketGroup() -> handler functions
// 4. Cleaned up when connection closes or times out (30 second inactivity)
//
// Thread Safety:
// Session embeds sync.Mutex to protect mutable state. Most handler functions
// acquire the session lock when modifying session fields. The packet queue
// (sendPackets channel) is safe for concurrent access.
type Session struct { type Session struct {
sync.Mutex sync.Mutex // Protects session state during concurrent handler execution
logger *zap.Logger
server *Server
rawConn net.Conn
cryptConn *network.CryptConn
sendPackets chan packet
clientContext *clientctx.ClientContext
lastPacket time.Time
userEnteredStage bool // If the user has entered a stage before // Core connection and logging
stageID string logger *zap.Logger // Logger with connection address
stage *Stage server *Server // Parent server reference
reservationStage *Stage // Required for the stateful MsgSysUnreserveStage packet. rawConn net.Conn // Underlying TCP connection
stagePass string // Temporary storage cryptConn *network.CryptConn // Encrypted connection wrapper
prevGuildID uint32 // Stores the last GuildID used in InfoGuild sendPackets chan packet // Outbound packet queue (buffered, size 20)
charID uint32 clientContext *clientctx.ClientContext // Client version and capabilities
logKey []byte lastPacket time.Time // Timestamp of last received packet (for timeout detection)
sessionStart int64
courses []mhfcourse.Course
token string
kqf []byte
kqfOverride bool
semaphore *Semaphore // Required for the stateful MsgSysUnreserveStage packet. // Stage (game area) state
userEnteredStage bool // Whether player has entered any stage during this session
stageID string // Current stage ID string (e.g., "sl1Ns200p0a0u0")
stage *Stage // Pointer to current stage object
reservationStage *Stage // Stage reserved for quest (used by unreserve packet)
stagePass string // Temporary password storage for password-protected stages
stageMoveStack *stringstack.StringStack // Navigation history for "back" functionality
// A stack containing the stage movement history (push on enter/move, pop on back) // Player identity and state
stageMoveStack *stringstack.StringStack charID uint32 // Character ID for this session
Name string // Character name (for debugging/logging)
prevGuildID uint32 // Last guild ID queried (cached for InfoGuild)
token string // Authentication token from sign server
logKey []byte // Logging encryption key
sessionStart int64 // Session start timestamp (Unix time)
courses []mhfcourse.Course // Active Monster Hunter courses (buffs/subscriptions)
kqf []byte // Key Quest Flags (quest progress tracking)
kqfOverride bool // Whether KQF is being overridden
// Accumulated index used for identifying mail for a client // Quest/event coordination
// I'm not certain why this is used, but since the client is sending it semaphore *Semaphore // Semaphore for quest/event participation (if in a coordinated activity)
// I want to rely on it for now as it might be important later.
mailAccIndex uint8
// Contains the mail list that maps accumulated indexes to mail IDs
mailList []int
// For Debuging // Mail system state
Name string // The mail system uses an accumulated index system where the client tracks
closed bool // mail by incrementing indices rather than direct mail IDs
mailAccIndex uint8 // Current accumulated mail index for this session
mailList []int // Maps accumulated indices to actual mail IDs
// Connection state
closed bool // Whether connection has been closed (prevents double-cleanup)
} }
// NewSession creates a new Session type. // NewSession creates and initializes a new Session for an incoming connection.
//
// The session is created with:
// - A logger tagged with the connection's remote address
// - An encrypted connection wrapper
// - A buffered packet send queue (size 20)
// - Initialized stage movement stack for navigation
// - Session start time set to current time
//
// After creation, call Start() to begin processing packets.
func NewSession(server *Server, conn net.Conn) *Session { func NewSession(server *Server, conn net.Conn) *Session {
s := &Session{ s := &Session{
logger: server.logger.Named(conn.RemoteAddr().String()), logger: server.logger.Named(conn.RemoteAddr().String()),
@@ -81,7 +112,17 @@ func NewSession(server *Server, conn net.Conn) *Session {
return s return s
} }
// Start starts the session packet send and recv loop(s). // Start begins the session's packet processing by launching send and receive goroutines.
//
// This method spawns two long-running goroutines:
// 1. sendLoop(): Continuously sends queued packets to the client
// 2. recvLoop(): Continuously receives and processes packets from the client
//
// The receive loop handles packet parsing, routing to handlers, and recursive
// packet group processing (when multiple packets arrive in one read).
//
// Both loops run until the connection closes or times out. Unlike the sign and
// entrance servers, the channel server does NOT expect an 8-byte NULL initialization.
func (s *Session) Start() { func (s *Session) Start() {
go func() { go func() {
s.logger.Debug("New connection", zap.String("RemoteAddr", s.rawConn.RemoteAddr().String())) s.logger.Debug("New connection", zap.String("RemoteAddr", s.rawConn.RemoteAddr().String()))
@@ -92,7 +133,19 @@ func (s *Session) Start() {
}() }()
} }
// QueueSend queues a packet (raw []byte) to be sent. // QueueSend queues a packet for transmission to the client (blocking).
//
// This method:
// 1. Logs the outbound packet (if dev mode is enabled)
// 2. Attempts to enqueue the packet to the send channel
// 3. If the queue is full, flushes non-blocking packets and retries
//
// Blocking vs Non-blocking:
// This is a blocking send - if the queue fills, it will flush non-blocking
// packets (broadcasts, non-critical messages) to make room for this packet.
// Use QueueSendNonBlocking() for packets that can be safely dropped.
//
// Thread Safety: Safe for concurrent calls from multiple goroutines.
func (s *Session) QueueSend(data []byte) { func (s *Session) QueueSend(data []byte) {
s.logMessage(binary.BigEndian.Uint16(data[0:2]), data, "Server", s.Name) s.logMessage(binary.BigEndian.Uint16(data[0:2]), data, "Server", s.Name)
select { select {
@@ -114,7 +167,18 @@ func (s *Session) QueueSend(data []byte) {
} }
} }
// QueueSendNonBlocking queues a packet (raw []byte) to be sent, dropping the packet entirely if the queue is full. // QueueSendNonBlocking queues a packet for transmission (non-blocking, lossy).
//
// Unlike QueueSend(), this method drops the packet immediately if the send queue
// is full. This is used for broadcast messages, stage updates, and other packets
// where occasional packet loss is acceptable (client will re-sync or request again).
//
// Use cases:
// - Stage broadcasts (player movement, chat)
// - Server-wide announcements
// - Non-critical status updates
//
// Thread Safety: Safe for concurrent calls from multiple goroutines.
func (s *Session) QueueSendNonBlocking(data []byte) { func (s *Session) QueueSendNonBlocking(data []byte) {
select { select {
case s.sendPackets <- packet{data, true}: case s.sendPackets <- packet{data, true}:
@@ -124,7 +188,15 @@ func (s *Session) QueueSendNonBlocking(data []byte) {
} }
} }
// QueueSendMHF queues a MHFPacket to be sent. // QueueSendMHF queues a structured MHFPacket for transmission to the client.
//
// This is a convenience method that:
// 1. Creates a byteframe and writes the packet opcode
// 2. Calls the packet's Build() method to serialize its data
// 3. Queues the resulting bytes using QueueSend()
//
// The packet is built with the session's clientContext, allowing version-specific
// packet formatting when needed.
func (s *Session) QueueSendMHF(pkt mhfpacket.MHFPacket) { func (s *Session) QueueSendMHF(pkt mhfpacket.MHFPacket) {
// Make the header // Make the header
bf := byteframe.NewByteFrame() bf := byteframe.NewByteFrame()
@@ -137,7 +209,15 @@ func (s *Session) QueueSendMHF(pkt mhfpacket.MHFPacket) {
s.QueueSend(bf.Data()) s.QueueSend(bf.Data())
} }
// QueueAck is a helper function to queue an MSG_SYS_ACK with the given ack handle and data. // QueueAck sends an acknowledgment packet with optional response data.
//
// Many client packets include an "ack handle" field - a unique identifier the client
// uses to match responses to requests. This method constructs and queues a MSG_SYS_ACK
// packet containing the ack handle and response data.
//
// Parameters:
// - ackHandle: The ack handle from the original client packet
// - data: Response payload bytes (can be empty for simple acks)
func (s *Session) QueueAck(ackHandle uint32, data []byte) { func (s *Session) QueueAck(ackHandle uint32, data []byte) {
bf := byteframe.NewByteFrame() bf := byteframe.NewByteFrame()
bf.WriteUint16(uint16(network.MSG_SYS_ACK)) bf.WriteUint16(uint16(network.MSG_SYS_ACK))

View File

@@ -7,49 +7,94 @@ import (
"erupe-ce/network/mhfpacket" "erupe-ce/network/mhfpacket"
) )
// Object holds infomation about a specific object. // Object represents a placeable object in a stage (e.g., ballista, bombs, traps).
//
// Objects are spawned by players during quests and can be interacted with by
// other players in the same stage. Each object has an owner, position, and
// unique ID for client-server synchronization.
type Object struct { type Object struct {
sync.RWMutex sync.RWMutex // Protects object state during updates
id uint32 id uint32 // Unique object ID (see NextObjectID for ID generation)
ownerCharID uint32 ownerCharID uint32 // Character ID of the player who placed this object
x, y, z float32 x, y, z float32 // 3D position coordinates
} }
// stageBinaryKey is a struct used as a map key for identifying a stage binary part. // stageBinaryKey is a composite key for identifying a specific piece of stage binary data.
//
// Stage binary data is custom game state that the stage host (quest leader) sets
// and the server echoes to other clients. It's used for quest state, monster HP,
// environmental conditions, etc. The data is keyed by two ID bytes.
type stageBinaryKey struct { type stageBinaryKey struct {
id0 uint8 id0 uint8 // First binary data identifier
id1 uint8 id1 uint8 // Second binary data identifier
} }
// Stage holds stage-specific information // Stage represents a game room/area where players interact.
//
// Stages are the core spatial concept in Monster Hunter Frontier. They represent:
// - Town areas (Mezeporta, Pallone, etc.) - persistent, always exist
// - Quest instances - created dynamically when a player starts a quest
// - Private rooms - password-protected player gathering areas
//
// Stage Lifecycle:
// 1. Created via NewStage() or MSG_SYS_CREATE_STAGE packet
// 2. Players enter via MSG_SYS_ENTER_STAGE or MSG_SYS_MOVE_STAGE
// 3. Stage host manages state via binary data packets
// 4. Destroyed via MSG_SYS_STAGE_DESTRUCT when empty or quest completes
//
// Client Participation:
// There are two types of client participation:
// - Active clients (in clients map): Currently in the stage, receive broadcasts
// - Reserved slots (in reservedClientSlots): Quest participants who haven't
// entered yet (e.g., loading screen, preparing). They hold a slot but don't
// receive stage broadcasts until they fully enter.
//
// Thread Safety:
// Stage embeds sync.RWMutex. Use RLock for reads (broadcasts, queries) and
// Lock for writes (entering, leaving, state changes).
type Stage struct { type Stage struct {
sync.RWMutex sync.RWMutex // Protects all stage state during concurrent access
// Stage ID string // Stage identity
id string id string // Stage ID string (e.g., "sl1Ns200p0a0u0" for Mezeporta)
// Objects // Objects in the stage (ballistas, bombs, traps, etc.)
objects map[uint32]*Object objects map[uint32]*Object // Active objects keyed by object ID
objectIndex uint8 objectIndex uint8 // Auto-incrementing index for object ID generation
// Map of session -> charID. // Active participants
// These are clients that are CURRENTLY in the stage clients map[*Session]uint32 // Sessions currently in stage -> their character ID
clients map[*Session]uint32
// Map of charID -> bool, key represents whether they are ready // Reserved slots for quest participants
// These are clients that aren't in the stage, but have reserved a slot (for quests, etc). // Map of charID -> ready status. These players have reserved a slot but
reservedClientSlots map[uint32]bool // haven't fully entered yet (e.g., still loading, in preparation screen)
reservedClientSlots map[uint32]bool // Character ID -> ready flag
// These are raw binary blobs that the stage owner sets, // Stage binary data
// other clients expect the server to echo them back in the exact same format. // Raw binary blobs set by the stage host (quest leader) that track quest state.
rawBinaryData map[stageBinaryKey][]byte // The server stores and echoes this data to clients verbatim. Used for:
// - Monster HP and status
// - Environmental state (time remaining, weather)
// - Quest objectives and progress
rawBinaryData map[stageBinaryKey][]byte // Binary state keyed by (id0, id1)
host *Session // Stage settings
maxPlayers uint16 host *Session // Stage host (quest leader, room creator)
password string maxPlayers uint16 // Maximum players allowed (default 4)
password string // Password for private stages (empty if public)
} }
// NewStage creates a new stage with intialized values. // NewStage creates and initializes a new Stage with the given ID.
//
// The stage is created with:
// - Empty client and reserved slot maps
// - Empty object map with objectIndex starting at 0
// - Empty binary data map
// - Default max players set to 4 (standard quest party size)
// - No password (public stage)
//
// For persistent town stages, this is called during server initialization.
// For dynamic quest stages, this is called when a player creates a quest.
func NewStage(ID string) *Stage { func NewStage(ID string) *Stage {
s := &Stage{ s := &Stage{
id: ID, id: ID,
@@ -63,7 +108,24 @@ func NewStage(ID string) *Stage {
return s return s
} }
// BroadcastMHF queues a MHFPacket to be sent to all sessions in the stage. // BroadcastMHF sends a packet to all players currently in the stage.
//
// This method is used for stage-local events like:
// - Player chat messages within the stage
// - Monster state updates
// - Object placement/removal notifications
// - Quest events visible only to stage participants
//
// The packet is built individually for each client to support version-specific
// formatting. Packets are sent non-blocking (dropped if queue full).
//
// Reserved clients (those who haven't fully entered) do NOT receive broadcasts.
//
// Parameters:
// - pkt: The MHFPacket to broadcast to stage participants
// - ignoredSession: Optional session to exclude (typically the sender)
//
// Thread Safety: This method holds the stage lock during iteration.
func (s *Stage) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) { func (s *Stage) BroadcastMHF(pkt mhfpacket.MHFPacket, ignoredSession *Session) {
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
@@ -96,6 +158,18 @@ func (s *Stage) isQuest() bool {
return len(s.reservedClientSlots) > 0 return len(s.reservedClientSlots) > 0
} }
// NextObjectID generates the next available object ID for this stage.
//
// Object IDs have special constraints due to client limitations:
// - Index 0 does not update position correctly (avoided)
// - Index 127 does not update position correctly (avoided)
// - Indices > 127 do not replicate correctly across clients (avoided)
//
// The ID is generated by packing bytes into a uint32 in a specific format
// expected by the client. The objectIndex cycles from 1-126 to stay within
// valid bounds.
//
// Thread Safety: Caller must hold stage lock when calling this method.
func (s *Stage) NextObjectID() uint32 { func (s *Stage) NextObjectID() uint32 {
s.objectIndex = s.objectIndex + 1 s.objectIndex = s.objectIndex + 1
// Objects beyond 127 do not duplicate correctly // Objects beyond 127 do not duplicate correctly

View File

@@ -177,7 +177,7 @@ func (s *Server) getGuildmatesForCharacters(chars []character) []members {
if err != nil { if err != nil {
continue continue
} }
for i, _ := range charGuildmates { for i := range charGuildmates {
charGuildmates[i].CID = char.ID charGuildmates[i].CID = char.ID
} }
guildmates = append(guildmates, charGuildmates...) guildmates = append(guildmates, charGuildmates...)