test(channelserver): replace time.Sleep with polling loops

Blind sleeps accumulate serially (no t.Parallel anywhere) and inflate
under the race detector's scheduling overhead — contributing to the
~136s channelserver test run time.

Replace ~75 arbitrary sleeps (50ms–1s) across 7 test files with 2s
polling loops that exit as soon as the expected condition holds. Sleeps
that are genuinely intentional (race-condition stress tests, cache
expiry, temporal spacing in timestamp tests, backpressure pacing) are
left untouched.
This commit is contained in:
Houmgaor
2026-03-23 10:57:01 +01:00
parent 0c6dc39371
commit d0efc4e81a
8 changed files with 159 additions and 75 deletions

View File

@@ -84,16 +84,10 @@ func TestSessionLifecycle_BasicSaveLoadCycle(t *testing.T) {
t.Log("Sending savedata packet")
handleMsgMhfSavedata(session1, savePkt)
// Drain ACK
time.Sleep(100 * time.Millisecond)
// Now trigger logout via the actual logout flow
t.Log("Triggering logout via logoutPlayer")
logoutPlayer(session1)
// Give logout time to complete
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Login again and verify data =====
t.Log("--- Starting Session 2: Login and verify data persists ---")
@@ -106,8 +100,6 @@ func TestSessionLifecycle_BasicSaveLoadCycle(t *testing.T) {
}
handleMsgMhfLoaddata(session2, loadPkt)
time.Sleep(50 * time.Millisecond)
// Verify savedata persisted
var savedCompressed []byte
err = db.QueryRow("SELECT savedata FROM characters WHERE id = $1", charID).Scan(&savedCompressed)
@@ -189,7 +181,6 @@ func TestSessionLifecycle_WarehouseDataPersistence(t *testing.T) {
// Logout
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Verify warehouse contents =====
session2 := createTestSessionForServerWithChar(server, charID, "WarehouseChar")
@@ -240,7 +231,6 @@ func TestSessionLifecycle_KoryoPointsPersistence(t *testing.T) {
t.Logf("Adding %d Koryo points", addPoints)
handleMsgMhfAddKouryouPoint(session1, pkt)
time.Sleep(50 * time.Millisecond)
// Verify points were added in session 1
var points1 uint32
@@ -252,7 +242,6 @@ func TestSessionLifecycle_KoryoPointsPersistence(t *testing.T) {
// Logout
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Verify Koryo points persist =====
session2 := createTestSessionForServerWithChar(server, charID, "KoryoChar")
@@ -341,14 +330,10 @@ func TestSessionLifecycle_MultipleDataTypesPersistence(t *testing.T) {
}
handleMsgMhfSavedata(session1, savePkt)
// Give handlers time to process
time.Sleep(100 * time.Millisecond)
t.Log("Modified all data types in session 1")
// Logout
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Verify all data persists =====
session2 := createTestSessionForServerWithChar(server, charID, "MultiChar")
@@ -358,7 +343,6 @@ func TestSessionLifecycle_MultipleDataTypesPersistence(t *testing.T) {
AckHandle: 5001,
}
handleMsgMhfLoaddata(session2, loadPkt)
time.Sleep(50 * time.Millisecond)
allPassed := true
@@ -472,13 +456,11 @@ func TestSessionLifecycle_DisconnectWithoutLogout(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session1, savePkt)
time.Sleep(100 * time.Millisecond)
// Simulate disconnect by calling logoutPlayer (which is called by recvLoop on EOF)
// In real scenario, this is triggered by connection close
t.Log("Simulating ungraceful disconnect")
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Verify data saved despite ungraceful disconnect =====
session2 := createTestSessionForServerWithChar(server, charID, "DisconnectChar")
@@ -544,7 +526,6 @@ func TestSessionLifecycle_RapidReconnect(t *testing.T) {
// Logout quickly
logoutPlayer(session)
time.Sleep(30 * time.Millisecond)
// Verify points persisted
var loadedPoints uint32