mirror of
https://github.com/Mezeporta/Erupe.git
synced 2026-03-23 08:03:51 +01:00
feat(shutdown): graceful drain + configurable countdown
Add ShutdownAndDrain to the channel server (issue #179 non-breaking subset): on SIGTERM/SIGINT, force-close all active sessions so that logoutPlayer runs for each one (saves character data, cleans up stages and semaphores), then poll until the session map empties or a 30-second context deadline passes. Existing Shutdown() is unchanged. Add ShutdownCountdownSeconds int config field (default 10) alongside DisableSoftCrash so operators can tune the broadcast countdown without patching code. A zero value falls back to 10 for safety. Fix pre-existing test failures: MsgMhfAddRewardSongCount has a complete Parse() implementation so it no longer belongs in the "NOT IMPLEMENTED" parse test list; its handler test is updated to pass a real packet and assert an ACK response instead of calling with nil.
This commit is contained in:
@@ -87,13 +87,17 @@ func TestHandleMsgMhfAddRewardSongCount(t *testing.T) {
|
||||
server := createMockServer()
|
||||
session := createMockSession(1, server)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("handleMsgMhfAddRewardSongCount panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
pkt := &mhfpacket.MsgMhfAddRewardSongCount{AckHandle: 42}
|
||||
handleMsgMhfAddRewardSongCount(session, pkt)
|
||||
|
||||
handleMsgMhfAddRewardSongCount(session, nil)
|
||||
select {
|
||||
case p := <-session.sendPackets:
|
||||
if len(p.data) == 0 {
|
||||
t.Error("Response packet should have data")
|
||||
}
|
||||
default:
|
||||
t.Error("No response packet queued")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleMsgMhfAcquireMonthlyReward(t *testing.T) {
|
||||
@@ -202,7 +206,6 @@ func TestEmptyHandlers_MiscFiles_Reward(t *testing.T) {
|
||||
name string
|
||||
fn func()
|
||||
}{
|
||||
{"handleMsgMhfAddRewardSongCount", func() { handleMsgMhfAddRewardSongCount(session, nil) }},
|
||||
{"handleMsgMhfAcceptReadReward", func() { handleMsgMhfAcceptReadReward(session, nil) }},
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package channelserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -246,6 +247,51 @@ func (s *Server) Shutdown() {
|
||||
|
||||
}
|
||||
|
||||
// ShutdownAndDrain stops accepting new connections, force-closes every active
|
||||
// session so that their logoutPlayer cleanup runs (saves character data, removes
|
||||
// from stages, etc.), then waits until all sessions have been removed from the
|
||||
// sessions map or ctx is cancelled. It is safe to call multiple times.
|
||||
func (s *Server) ShutdownAndDrain(ctx context.Context) {
|
||||
s.Shutdown()
|
||||
|
||||
// Snapshot all active connections while holding the lock, then close them
|
||||
// outside the lock so we don't hold it during I/O. Closing a connection
|
||||
// causes the session's recvLoop to see io.EOF and call logoutPlayer(), which
|
||||
// in turn deletes the entry from s.sessions under the server mutex.
|
||||
s.Lock()
|
||||
conns := make([]net.Conn, 0, len(s.sessions))
|
||||
for conn := range s.sessions {
|
||||
conns = append(conns, conn)
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
for _, conn := range conns {
|
||||
_ = conn.Close()
|
||||
}
|
||||
|
||||
// Poll until logoutPlayer has removed every session or the deadline passes.
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
s.Lock()
|
||||
remaining := len(s.sessions)
|
||||
s.Unlock()
|
||||
s.logger.Warn("Shutdown drain timed out", zap.Int("remaining_sessions", remaining))
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.Lock()
|
||||
n := len(s.sessions)
|
||||
s.Unlock()
|
||||
if n == 0 {
|
||||
s.logger.Info("Shutdown drain complete")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) acceptClients() {
|
||||
for {
|
||||
conn, err := s.listener.Accept()
|
||||
|
||||
Reference in New Issue
Block a user