fix: replace fmt.Sprintf in logger calls with structured fields and add LoopDelay default

fmt.Sprintf inside zap logger calls defeats structured logging,
making log aggregation and filtering harder. All 6 sites now use
proper zap fields (zap.Uint32, zap.Uint8, zap.String).

LoopDelay had no viper.SetDefault, so omitting it from config.json
caused a zero-value (0 ms) busy-loop in the recv loop. Default is
now 50 ms, matching config.example.json.
This commit is contained in:
Houmgaor
2026-02-22 16:32:43 +01:00
parent b3f75232a3
commit 1d507b3d11
6 changed files with 172 additions and 6 deletions

View File

@@ -180,7 +180,7 @@ func handleMsgMhfLoaddata(s *Session, p mhfpacket.MHFPacket) {
data, err := s.server.charRepo.LoadColumn(s.charID, "savedata")
if err != nil || len(data) == 0 {
s.logger.Warn(fmt.Sprintf("Failed to load savedata (CID: %d)", s.charID), zap.Error(err))
s.logger.Warn("Failed to load savedata", zap.Uint32("charID", s.charID), zap.Error(err))
_ = s.rawConn.Close() // Terminate the connection
return
}

View File

@@ -31,7 +31,7 @@ func handleMsgMhfOperateGuild(s *Session, p mhfpacket.MHFPacket) {
case mhfpacket.OperateGuildDisband:
response := 1
if guild.LeaderCharID != s.charID {
s.logger.Warn(fmt.Sprintf("character '%d' is attempting to manage guild '%d' without permission", s.charID, guild.ID))
s.logger.Warn("Unauthorized guild management attempt", zap.Uint32("charID", s.charID), zap.Uint32("guildID", guild.ID))
response = 0
} else {
err = s.server.guildRepo.Disband(guild.ID)
@@ -309,7 +309,7 @@ func handleMsgMhfOperateGuildMember(s *Session, p mhfpacket.MHFPacket) {
}
default:
doAckSimpleFail(s, pkt.AckHandle, make([]byte, 4))
s.logger.Warn(fmt.Sprintf("unhandled operateGuildMember action '%d'", pkt.Action))
s.logger.Warn("Unhandled operateGuildMember action", zap.Uint8("action", pkt.Action))
}
if err != nil {

View File

@@ -109,7 +109,7 @@ func handleMsgSysGetFile(s *Session, p mhfpacket.MHFPacket) {
// Read the scenario file.
data, err := os.ReadFile(filepath.Join(s.server.erupeConfig.BinPath, fmt.Sprintf("scenarios/%s.bin", filename)))
if err != nil {
s.logger.Error(fmt.Sprintf("Failed to open file: %s/scenarios/%s.bin", s.server.erupeConfig.BinPath, filename))
s.logger.Error("Failed to open scenario file", zap.String("binPath", s.server.erupeConfig.BinPath), zap.String("filename", filename))
doAckBufFail(s, pkt.AckHandle, nil)
return
}
@@ -128,7 +128,7 @@ func handleMsgSysGetFile(s *Session, p mhfpacket.MHFPacket) {
data, err := os.ReadFile(filepath.Join(s.server.erupeConfig.BinPath, fmt.Sprintf("quests/%s.bin", pkt.Filename)))
if err != nil {
s.logger.Error(fmt.Sprintf("Failed to open file: %s/quests/%s.bin", s.server.erupeConfig.BinPath, pkt.Filename))
s.logger.Error("Failed to open quest file", zap.String("binPath", s.server.erupeConfig.BinPath), zap.String("filename", pkt.Filename))
doAckBufFail(s, pkt.AckHandle, nil)
return
}

View File

@@ -58,7 +58,7 @@ func handleMsgSysLogin(s *Session, p mhfpacket.MHFPacket) {
if !s.server.erupeConfig.DebugOptions.DisableTokenCheck {
if err := s.server.sessionRepo.ValidateLoginToken(pkt.LoginTokenString, pkt.LoginTokenNumber, pkt.CharID0); err != nil {
_ = s.rawConn.Close()
s.logger.Warn(fmt.Sprintf("Invalid login token, offending CID: (%d)", pkt.CharID0))
s.logger.Warn("Invalid login token", zap.Uint32("charID", pkt.CharID0))
return
}
}