From 0ea399f1356e26d8a3f1d7d861ffa43bb9f65f48 Mon Sep 17 00:00:00 2001 From: Houmgaor Date: Sat, 21 Mar 2026 19:38:16 +0100 Subject: [PATCH 1/7] feat(config): add DisableSaveIntegrityCheck flag for save transfers The SHA-256 integrity check introduced in migration 0007 blocks saves when a character's savedata blob is imported from another server instance, because the stored hash in the target DB no longer matches the new blob. Adding DisableSaveIntegrityCheck (default: false) lets server operators bypass the check to unblock cross-server save transfers. A warning is logged each time the check is skipped so the flag's use is auditable. Documents the per-character SQL alternative in CHANGELOG: UPDATE characters SET savedata_hash = NULL WHERE id = Closes #183. --- CHANGELOG.md | 3 + config.reference.json | 1 + config/config.go | 3 +- config/config_load_test.go | 3 +- config/config_test.go | 3 +- server/channelserver/handlers_character.go | 7 +- .../channelserver/handlers_character_test.go | 65 +++++++++++++++++++ server/channelserver/repo_mocks_test.go | 3 +- 8 files changed, 83 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16453b03a..147288881 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added +- `DisableSaveIntegrityCheck` config flag: when `true`, the SHA-256 savedata integrity check is skipped on load. Intended for cross-server save transfers where the stored hash in the database does not match the imported save blob. Defaults to `false`. Affected characters can alternatively be unblocked per-character with `UPDATE characters SET savedata_hash = NULL WHERE id = `. + ## [9.3.0] - 2026-03-19 ### Fixed diff --git a/config.reference.json b/config.reference.json index 0e1270e88..678370d6f 100644 --- a/config.reference.json +++ b/config.reference.json @@ -17,6 +17,7 @@ "UploadQuality":100 }, "DeleteOnSaveCorruption": false, + "DisableSaveIntegrityCheck": false, "ClientMode": "ZZ", "QuestCacheExpiry": 300, "CommandPrefix": "!", diff --git a/config/config.go b/config/config.go index 8555fe701..b731b45b0 100644 --- a/config/config.go +++ b/config/config.go @@ -72,7 +72,8 @@ type Config struct { LoginNotices []string // MHFML string of the login notices displayed PatchServerManifest string // Manifest patch server override PatchServerFile string // File patch server override - DeleteOnSaveCorruption bool // Attempts to save corrupted data will flag the save for deletion + DeleteOnSaveCorruption bool // Attempts to save corrupted data will flag the save for deletion + DisableSaveIntegrityCheck bool // Skip SHA-256 hash verification on load (needed for cross-server save transfers) ClientMode string RealClientMode Mode QuestCacheExpiry int // Number of seconds to keep quest data cached diff --git a/config/config_load_test.go b/config/config_load_test.go index d19359edc..e88e3bf7a 100644 --- a/config/config_load_test.go +++ b/config/config_load_test.go @@ -150,7 +150,8 @@ func TestConfigStruct(t *testing.T) { LoginNotices: []string{"Welcome"}, PatchServerManifest: "http://patch.example.com/manifest", PatchServerFile: "http://patch.example.com/files", - DeleteOnSaveCorruption: false, + DeleteOnSaveCorruption: false, + DisableSaveIntegrityCheck: false, ClientMode: "ZZ", RealClientMode: ZZ, QuestCacheExpiry: 3600, diff --git a/config/config_test.go b/config/config_test.go index 6fc66f06b..65951fd6c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -157,7 +157,8 @@ func TestConfigStructTypes(t *testing.T) { LoginNotices: []string{"Notice"}, PatchServerManifest: "http://patch.example.com", PatchServerFile: "http://files.example.com", - DeleteOnSaveCorruption: false, + DeleteOnSaveCorruption: false, + DisableSaveIntegrityCheck: false, ClientMode: "ZZ", RealClientMode: ZZ, QuestCacheExpiry: 3600, diff --git a/server/channelserver/handlers_character.go b/server/channelserver/handlers_character.go index b8a84bfca..3fc8fb60b 100644 --- a/server/channelserver/handlers_character.go +++ b/server/channelserver/handlers_character.go @@ -55,7 +55,8 @@ func GetCharacterSaveData(s *Session, charID uint32) (*CharacterSaveData, error) // Verify integrity checksum if one was stored with this save. // A nil hash means the character was saved before checksums were introduced, // so we skip verification (the next save will compute and store the hash). - if storedHash != nil { + // DisableSaveIntegrityCheck bypasses this entirely for cross-server save transfers. + if storedHash != nil && !s.server.erupeConfig.DisableSaveIntegrityCheck { computedHash := sha256.Sum256(saveData.decompSave) if !bytes.Equal(storedHash, computedHash[:]) { s.logger.Error("Savedata integrity check failed: hash mismatch", @@ -66,6 +67,10 @@ func GetCharacterSaveData(s *Session, charID uint32) (*CharacterSaveData, error) // TODO: attempt recovery from savedata_backups here return nil, errors.New("savedata integrity check failed") } + } else if storedHash != nil && s.server.erupeConfig.DisableSaveIntegrityCheck { + s.logger.Warn("Savedata integrity check skipped (DisableSaveIntegrityCheck=true)", + zap.Uint32("charID", charID), + ) } saveData.updateStructWithSaveData() diff --git a/server/channelserver/handlers_character_test.go b/server/channelserver/handlers_character_test.go index 40047c7ae..102647b34 100644 --- a/server/channelserver/handlers_character_test.go +++ b/server/channelserver/handlers_character_test.go @@ -747,3 +747,68 @@ func TestGetCharacterSaveData_ConfigMode(t *testing.T) { }) } } + +// TestGetCharacterSaveData_IntegrityCheck verifies the SHA-256 hash guard and +// that DisableSaveIntegrityCheck bypasses it without returning an error. +func TestGetCharacterSaveData_IntegrityCheck(t *testing.T) { + // Build a minimal valid savedata blob and compress it. + rawSave := make([]byte, 150000) + copy(rawSave[88:], []byte("TestChar\x00")) + compressed, err := nullcomp.Compress(rawSave) + if err != nil { + t.Fatalf("compress: %v", err) + } + + // A hash that deliberately does NOT match rawSave. + wrongHash := bytes.Repeat([]byte{0xDE}, 32) + + tests := []struct { + name string + disable bool + hash []byte + wantErr bool + }{ + { + name: "nil hash skips check", + disable: false, + hash: nil, + wantErr: false, + }, + { + name: "mismatched hash fails when check enabled", + disable: false, + hash: wrongHash, + wantErr: true, + }, + { + name: "mismatched hash passes when check disabled", + disable: true, + hash: wrongHash, + wantErr: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mock := newMockCharacterRepo() + mock.loadSaveDataID = 1 + mock.loadSaveDataData = compressed + mock.loadSaveDataName = "TestChar" + mock.loadSaveDataHash = tc.hash + + server := createMockServer() + server.erupeConfig.RealClientMode = cfg.ZZ + server.erupeConfig.DisableSaveIntegrityCheck = tc.disable + server.charRepo = mock + session := createMockSession(1, server) + + _, err := GetCharacterSaveData(session, 1) + if tc.wantErr && err == nil { + t.Error("expected error, got nil") + } + if !tc.wantErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} diff --git a/server/channelserver/repo_mocks_test.go b/server/channelserver/repo_mocks_test.go index dbd527669..1291c5a74 100644 --- a/server/channelserver/repo_mocks_test.go +++ b/server/channelserver/repo_mocks_test.go @@ -133,6 +133,7 @@ type mockCharacterRepo struct { loadSaveDataData []byte loadSaveDataNew bool loadSaveDataName string + loadSaveDataHash []byte loadSaveDataErr error // ReadEtcPoints mock fields @@ -245,7 +246,7 @@ func (m *mockCharacterRepo) SaveBackup(_ uint32, _ int, _ []byte) error { func (m *mockCharacterRepo) GetLastBackupTime(_ uint32) (time.Time, error) { return time.Time{}, nil } func (m *mockCharacterRepo) SaveCharacterDataAtomic(_ SaveAtomicParams) error { return nil } func (m *mockCharacterRepo) LoadSaveDataWithHash(_ uint32) (uint32, []byte, bool, string, []byte, error) { - return m.loadSaveDataID, m.loadSaveDataData, m.loadSaveDataNew, m.loadSaveDataName, nil, m.loadSaveDataErr + return m.loadSaveDataID, m.loadSaveDataData, m.loadSaveDataNew, m.loadSaveDataName, m.loadSaveDataHash, m.loadSaveDataErr } // --- mockGoocooRepo --- From 3803fd431b9253667cad6b2f1d6eb42585f98449 Mon Sep 17 00:00:00 2001 From: Houmgaor Date: Mon, 23 Mar 2026 10:13:00 +0100 Subject: [PATCH 2/7] chore(release): prepare 9.3.1 --- CHANGELOG.md | 2 ++ main.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 147288881..3b2fbae63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [9.3.1] - 2026-03-23 + ### Added - `DisableSaveIntegrityCheck` config flag: when `true`, the SHA-256 savedata integrity check is skipped on load. Intended for cross-server save transfers where the stored hash in the database does not match the imported save blob. Defaults to `false`. Affected characters can alternatively be unblocked per-character with `UPDATE characters SET savedata_hash = NULL WHERE id = `. diff --git a/main.go b/main.go index 0aee2a227..18e80737d 100644 --- a/main.go +++ b/main.go @@ -106,7 +106,7 @@ func main() { } } - logger.Info(fmt.Sprintf("Starting Erupe (9.3.0-%s)", Commit())) + logger.Info(fmt.Sprintf("Starting Erupe (9.3.1-%s)", Commit())) logger.Info(fmt.Sprintf("Client Mode: %s (%d)", config.ClientMode, config.RealClientMode)) if config.Database.Password == "" { From 1f0544fd10b6b4eef2fad02886a46f432ebf358d Mon Sep 17 00:00:00 2001 From: Houmgaor Date: Mon, 23 Mar 2026 10:15:25 +0100 Subject: [PATCH 3/7] fix(savedata): add recovery hint to integrity check failure log Admins importing saves from another server instance will hit the hash mismatch error with no guidance. The log message now tells them to set DisableSaveIntegrityCheck=true or null the hash for the specific character via SQL. --- server/channelserver/handlers_character.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/channelserver/handlers_character.go b/server/channelserver/handlers_character.go index 3fc8fb60b..429dc1a23 100644 --- a/server/channelserver/handlers_character.go +++ b/server/channelserver/handlers_character.go @@ -59,7 +59,9 @@ func GetCharacterSaveData(s *Session, charID uint32) (*CharacterSaveData, error) if storedHash != nil && !s.server.erupeConfig.DisableSaveIntegrityCheck { computedHash := sha256.Sum256(saveData.decompSave) if !bytes.Equal(storedHash, computedHash[:]) { - s.logger.Error("Savedata integrity check failed: hash mismatch", + s.logger.Error("Savedata integrity check failed: hash mismatch — "+ + "if this character was imported from another server, set DisableSaveIntegrityCheck=true in config.json "+ + "or run: UPDATE characters SET savedata_hash = NULL WHERE id = ", zap.Uint32("charID", charID), zap.Binary("stored_hash", storedHash), zap.Binary("computed_hash", computedHash[:]), From 635b9890c8a94fc89fb1bd64ca694877e40ef6b7 Mon Sep 17 00:00:00 2001 From: Houmgaor Date: Mon, 23 Mar 2026 10:26:29 +0100 Subject: [PATCH 4/7] test(broadcast): fix flaky TestBroadcastMHFAllSessions under race detector The fixed 100ms sleep was too short for sendLoop goroutines to drain under the race detector's scheduling overhead, causing intermittent count=4/want=5 failures. Replace with a 2s polling loop that exits as soon as all sessions report delivery. --- server/channelserver/sys_channel_server_test.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/server/channelserver/sys_channel_server_test.go b/server/channelserver/sys_channel_server_test.go index cc38bf55e..0b90d3a9d 100644 --- a/server/channelserver/sys_channel_server_test.go +++ b/server/channelserver/sys_channel_server_test.go @@ -278,13 +278,26 @@ func TestBroadcastMHFAllSessions(t *testing.T) { testPkt := &mhfpacket.MsgSysNop{} server.BroadcastMHF(testPkt, nil) - time.Sleep(100 * time.Millisecond) + // Poll until all sessions have received the packet or the deadline is reached. + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + receivedCount := 0 + for _, sess := range server.sessions { + mock := sess.cryptConn.(*MockCryptConn) + if mock.PacketCount() > 0 { + receivedCount++ + } + } + if receivedCount == sessionCount { + break + } + time.Sleep(10 * time.Millisecond) + } // Stop all sessions for _, sess := range sessions { sess.closed.Store(true) } - time.Sleep(50 * time.Millisecond) // Verify all sessions received the packet receivedCount := 0 From 0c6dc393719d9c5f813ef585e940df07267f8008 Mon Sep 17 00:00:00 2001 From: Houmgaor Date: Mon, 23 Mar 2026 10:36:31 +0100 Subject: [PATCH 5/7] ci: upgrade actions to Node.js 24-compatible versions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Node.js 20 actions are deprecated and will be forced to Node.js 24 starting June 2, 2026. Bump to versions that ship Node.js 24 runtimes: actions/checkout v4 → v6 actions/setup-go v5 → v6 golangci-lint-action v7 → v9 actions/upload-artifact v4 → v6 actions/download-artifact v4 → v8 --- .github/workflows/docker.yml | 2 +- .github/workflows/go.yml | 18 +++++++++--------- .github/workflows/release.yml | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1f2fc99b3..9b4e4dd75 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -20,7 +20,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Log in to the Container registry uses: docker/login-action@v3 diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 9749509ba..7d953a997 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -46,10 +46,10 @@ jobs: --mount type=tmpfs,destination=/var/lib/postgresql/data steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up Go - uses: actions/setup-go@v5 + uses: actions/setup-go@v6 with: go-version: '1.25' @@ -80,10 +80,10 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up Go - uses: actions/setup-go@v5 + uses: actions/setup-go@v6 with: go-version: '1.25' @@ -94,7 +94,7 @@ jobs: run: env GOOS=linux GOARCH=amd64 go build -v - name: Upload Linux-amd64 artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: Linux-amd64 path: | @@ -109,7 +109,7 @@ jobs: run: env GOOS=windows GOARCH=amd64 go build -v - name: Upload Windows-amd64 artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: Windows-amd64 path: | @@ -125,15 +125,15 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up Go - uses: actions/setup-go@v5 + uses: actions/setup-go@v6 with: go-version: '1.25' - name: Run golangci-lint - uses: golangci/golangci-lint-action@v7 + uses: golangci/golangci-lint-action@v9 with: version: v2.10.1 args: --timeout=5m diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ca8ed42ad..b942e5cef 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -32,10 +32,10 @@ jobs: binary: erupe-ce.exe steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up Go - uses: actions/setup-go@v5 + uses: actions/setup-go@v6 with: go-version: '1.25' @@ -56,7 +56,7 @@ jobs: cd staging && zip -r ../erupe-${{ matrix.os_name }}.zip . - name: Upload build artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: ${{ matrix.os_name }} path: erupe-${{ matrix.os_name }}.zip @@ -68,7 +68,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Log in to the Container registry uses: docker/login-action@v3 @@ -108,10 +108,10 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Download all artifacts - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: path: artifacts pattern: '*-amd64' From d0efc4e81aac439a1549649564816a1463bcc826 Mon Sep 17 00:00:00 2001 From: Houmgaor Date: Mon, 23 Mar 2026 10:57:01 +0100 Subject: [PATCH 6/7] test(channelserver): replace time.Sleep with polling loops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Blind sleeps accumulate serially (no t.Parallel anywhere) and inflate under the race detector's scheduling overhead — contributing to the ~136s channelserver test run time. Replace ~75 arbitrary sleeps (50ms–1s) across 7 test files with 2s polling loops that exit as soon as the expected condition holds. Sleeps that are genuinely intentional (race-condition stress tests, cache expiry, temporal spacing in timestamp tests, backpressure pacing) are left untouched. --- CHANGELOG.md | 6 +- .../client_connection_simulation_test.go | 13 --- .../handlers_rengoku_integration_test.go | 2 - .../handlers_savedata_integration_test.go | 4 - server/channelserver/integration_test.go | 109 +++++++++++++++--- .../savedata_lifecycle_monitoring_test.go | 9 -- .../session_lifecycle_integration_test.go | 19 --- server/channelserver/sys_session_test.go | 72 ++++++++++-- 8 files changed, 159 insertions(+), 75 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b2fbae63..8bda44230 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [9.3.1] - 2026-03-23 ### Added -- `DisableSaveIntegrityCheck` config flag: when `true`, the SHA-256 savedata integrity check is skipped on load. Intended for cross-server save transfers where the stored hash in the database does not match the imported save blob. Defaults to `false`. Affected characters can alternatively be unblocked per-character with `UPDATE characters SET savedata_hash = NULL WHERE id = `. + +- `DisableSaveIntegrityCheck` config flag: when `true`, the SHA-256 savedata integrity check is skipped on load. +Intended for cross-server save transfers where the stored hash in the database does not match the imported save blob. +Defaults to `false`. +Affected characters can alternatively be unblocked per-character with `UPDATE characters SET savedata_hash = NULL WHERE id = `. ## [9.3.0] - 2026-03-19 diff --git a/server/channelserver/client_connection_simulation_test.go b/server/channelserver/client_connection_simulation_test.go index 1bb69de2a..e4d8d96b0 100644 --- a/server/channelserver/client_connection_simulation_test.go +++ b/server/channelserver/client_connection_simulation_test.go @@ -153,13 +153,11 @@ func TestClientConnection_GracefulLoginLogout(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session, savePkt) - time.Sleep(100 * time.Millisecond) // Client sends logout packet (graceful) t.Log("Client sending logout packet") logoutPkt := &mhfpacket.MsgSysLogout{} handleMsgSysLogout(session, logoutPkt) - time.Sleep(100 * time.Millisecond) // Verify connection closed if !mockConn.IsClosed() { @@ -220,13 +218,11 @@ func TestClientConnection_UngracefulDisconnect(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session, savePkt) - time.Sleep(100 * time.Millisecond) // Simulate network failure - connection drops without logout packet t.Log("Simulating network failure (no logout packet sent)") // In real scenario, recvLoop would detect io.EOF and call logoutPlayer logoutPlayer(session) - time.Sleep(100 * time.Millisecond) // Verify data was saved despite ungraceful disconnect var savedCompressed []byte @@ -274,7 +270,6 @@ func TestClientConnection_SessionTimeout(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session, savePkt) - time.Sleep(100 * time.Millisecond) // Simulate timeout by setting lastPacket to long ago session.lastPacket = time.Now().Add(-35 * time.Second) @@ -283,7 +278,6 @@ func TestClientConnection_SessionTimeout(t *testing.T) { // and call logoutPlayer(session) t.Log("Session timed out (>30s since last packet)") logoutPlayer(session) - time.Sleep(100 * time.Millisecond) // Verify data saved var savedCompressed []byte @@ -346,11 +340,9 @@ func TestClientConnection_MultipleClientsSimultaneous(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session, savePkt) - time.Sleep(50 * time.Millisecond) // Graceful logout logoutPlayer(session) - time.Sleep(50 * time.Millisecond) // Verify individual client's data var savedCompressed []byte @@ -416,12 +408,10 @@ func TestClientConnection_SaveDuringCombat(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session, savePkt) - time.Sleep(100 * time.Millisecond) // Disconnect while in stage t.Log("Player disconnects during quest") logoutPlayer(session) - time.Sleep(100 * time.Millisecond) // Verify data saved even during combat var savedCompressed []byte @@ -474,12 +464,10 @@ func TestClientConnection_ReconnectAfterCrash(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session1, savePkt) - time.Sleep(50 * time.Millisecond) // Client crashes (ungraceful disconnect) t.Log("Client crashes (no logout packet)") logoutPlayer(session1) - time.Sleep(100 * time.Millisecond) // Client reconnects immediately t.Log("Client reconnects after crash") @@ -492,7 +480,6 @@ func TestClientConnection_ReconnectAfterCrash(t *testing.T) { AckHandle: 18001, } handleMsgMhfLoaddata(session2, loadPkt) - time.Sleep(50 * time.Millisecond) // Verify data from before crash var savedCompressed []byte diff --git a/server/channelserver/handlers_rengoku_integration_test.go b/server/channelserver/handlers_rengoku_integration_test.go index 390665f63..742170ae4 100644 --- a/server/channelserver/handlers_rengoku_integration_test.go +++ b/server/channelserver/handlers_rengoku_integration_test.go @@ -229,7 +229,6 @@ func TestRengokuData_SaveLoadRoundTrip_AcrossSessions(t *testing.T) { // Logout session 1 logoutPlayer(session1) - time.Sleep(100 * time.Millisecond) // === SESSION 2: Load data in new session === session2 := createTestSessionForServerWithChar(server, charID, "RengokuChar2") @@ -348,7 +347,6 @@ func TestRengokuData_SkillRegionPreserved(t *testing.T) { handleMsgMhfSaveRengokuData(session1, savePkt) drainAck(t, session1) logoutPlayer(session1) - time.Sleep(100 * time.Millisecond) // === SESSION 2: Load and verify skill region === session2 := createTestSessionForServerWithChar(server, charID, "SkillChar") diff --git a/server/channelserver/handlers_savedata_integration_test.go b/server/channelserver/handlers_savedata_integration_test.go index 4c3e9aab0..28930a5fd 100644 --- a/server/channelserver/handlers_savedata_integration_test.go +++ b/server/channelserver/handlers_savedata_integration_test.go @@ -3,7 +3,6 @@ package channelserver import ( "bytes" "testing" - "time" "erupe-ce/common/mhfitem" cfg "erupe-ce/config" @@ -617,9 +616,6 @@ func TestPlateDataPersistenceDuringLogout(t *testing.T) { t.Log("Triggering logout via logoutPlayer") logoutPlayer(session) - // Give logout time to complete - time.Sleep(100 * time.Millisecond) - // ===== VERIFICATION: Check all plate data was saved ===== t.Log("--- Verifying plate data persisted ---") diff --git a/server/channelserver/integration_test.go b/server/channelserver/integration_test.go index 3db93c58e..3b5746316 100644 --- a/server/channelserver/integration_test.go +++ b/server/channelserver/integration_test.go @@ -84,7 +84,13 @@ func IntegrationTest_PacketQueueFlow(t *testing.T) { done: s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= tt.wantPackets { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != tt.wantPackets { @@ -175,7 +181,13 @@ func IntegrationTest_ConcurrentQueueing(t *testing.T) { done: s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= expectedTotal { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != expectedTotal { @@ -237,9 +249,14 @@ func IntegrationTest_AckPacketFlow(t *testing.T) { } // Wait for ACKs to be sent - time.Sleep(200 * time.Millisecond) s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= ackCount { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != ackCount { @@ -307,9 +324,14 @@ func IntegrationTest_MixedPacketTypes(t *testing.T) { s.QueueSendNonBlocking([]byte{0x00, 0x03, 0xEE}) // Wait for all packets - time.Sleep(200 * time.Millisecond) s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 4 { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != 4 { @@ -357,9 +379,14 @@ func IntegrationTest_PacketOrderPreservation(t *testing.T) { } // Wait for packets - time.Sleep(300 * time.Millisecond) s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= packetCount { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != packetCount { @@ -423,9 +450,14 @@ func IntegrationTest_QueueBackpressure(t *testing.T) { } // Wait for processing - time.Sleep(1 * time.Second) s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() > 0 { + break + } + time.Sleep(10 * time.Millisecond) + } // Some packets should have been sent sentCount := mock.PacketCount() @@ -502,7 +534,13 @@ func IntegrationTest_GuildEnumerationFlow(t *testing.T) { done: s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= tt.guildCount { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != tt.guildCount { @@ -571,9 +609,21 @@ func IntegrationTest_ConcurrentClientAccess(t *testing.T) { s.QueueSend(testData) } - time.Sleep(100 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= tt.packetsPerClient { + break + } + time.Sleep(10 * time.Millisecond) + } s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline = time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= tt.packetsPerClient { + break + } + time.Sleep(10 * time.Millisecond) + } sentCount := mock.PacketCount() mu.Lock() @@ -638,9 +688,21 @@ func IntegrationTest_ClientVersionCompatibility(t *testing.T) { testData := []byte{0x00, 0x01, 0xAA, 0xBB} s.QueueSend(testData) - time.Sleep(100 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline = time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } sentCount := mock.PacketCount() if (sentCount > 0) != tt.shouldSucceed { @@ -674,9 +736,14 @@ func IntegrationTest_PacketPrioritization(t *testing.T) { s.QueueSend([]byte{0x00, byte(i), 0xDD}) } - time.Sleep(200 * time.Millisecond) s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 10 { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) < 10 { @@ -732,7 +799,13 @@ func IntegrationTest_DataIntegrityUnderLoad(t *testing.T) { done: s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= packetCount { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != packetCount { diff --git a/server/channelserver/savedata_lifecycle_monitoring_test.go b/server/channelserver/savedata_lifecycle_monitoring_test.go index a6af41610..2f91a4656 100644 --- a/server/channelserver/savedata_lifecycle_monitoring_test.go +++ b/server/channelserver/savedata_lifecycle_monitoring_test.go @@ -155,13 +155,11 @@ func TestMonitored_SaveHandlerInvocationDuringLogout(t *testing.T) { t.Log("Calling handleMsgMhfSavedata during session") handleMsgMhfSavedata(session, savePkt) monitor.RecordSavedata() - time.Sleep(100 * time.Millisecond) // Now trigger logout t.Log("Triggering logout - monitoring if save handlers are called") monitor.RecordLogout() logoutPlayer(session) - time.Sleep(100 * time.Millisecond) // Report statistics t.Log(monitor.GetStats()) @@ -233,12 +231,10 @@ func TestWithLogging_LogoutFlowAnalysis(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session, savePkt) - time.Sleep(50 * time.Millisecond) // Trigger logout t.Log("Triggering logout with logging enabled") logoutPlayer(session) - time.Sleep(100 * time.Millisecond) // Analyze logs allLogs := logs.All() @@ -317,11 +313,9 @@ func TestConcurrent_MultipleSessionsSaving(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session, savePkt) - time.Sleep(50 * time.Millisecond) // Logout logoutPlayer(session) - time.Sleep(50 * time.Millisecond) // Verify data saved var savedCompressed []byte @@ -376,11 +370,9 @@ func TestSequential_RepeatedLogoutLoginCycles(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session, savePkt) - time.Sleep(50 * time.Millisecond) // Logout logoutPlayer(session) - time.Sleep(50 * time.Millisecond) // Verify data after each cycle var savedCompressed []byte @@ -452,7 +444,6 @@ func TestRealtime_SaveDataTimestamps(t *testing.T) { events = append(events, SaveEvent{time.Now(), "logout_start"}) logoutPlayer(session) events = append(events, SaveEvent{time.Now(), "logout_end"}) - time.Sleep(50 * time.Millisecond) // Print timeline t.Log("Save event timeline:") diff --git a/server/channelserver/session_lifecycle_integration_test.go b/server/channelserver/session_lifecycle_integration_test.go index f00f6864f..7884ec8d1 100644 --- a/server/channelserver/session_lifecycle_integration_test.go +++ b/server/channelserver/session_lifecycle_integration_test.go @@ -84,16 +84,10 @@ func TestSessionLifecycle_BasicSaveLoadCycle(t *testing.T) { t.Log("Sending savedata packet") handleMsgMhfSavedata(session1, savePkt) - // Drain ACK - time.Sleep(100 * time.Millisecond) - // Now trigger logout via the actual logout flow t.Log("Triggering logout via logoutPlayer") logoutPlayer(session1) - // Give logout time to complete - time.Sleep(100 * time.Millisecond) - // ===== SESSION 2: Login again and verify data ===== t.Log("--- Starting Session 2: Login and verify data persists ---") @@ -106,8 +100,6 @@ func TestSessionLifecycle_BasicSaveLoadCycle(t *testing.T) { } handleMsgMhfLoaddata(session2, loadPkt) - time.Sleep(50 * time.Millisecond) - // Verify savedata persisted var savedCompressed []byte err = db.QueryRow("SELECT savedata FROM characters WHERE id = $1", charID).Scan(&savedCompressed) @@ -189,7 +181,6 @@ func TestSessionLifecycle_WarehouseDataPersistence(t *testing.T) { // Logout logoutPlayer(session1) - time.Sleep(100 * time.Millisecond) // ===== SESSION 2: Verify warehouse contents ===== session2 := createTestSessionForServerWithChar(server, charID, "WarehouseChar") @@ -240,7 +231,6 @@ func TestSessionLifecycle_KoryoPointsPersistence(t *testing.T) { t.Logf("Adding %d Koryo points", addPoints) handleMsgMhfAddKouryouPoint(session1, pkt) - time.Sleep(50 * time.Millisecond) // Verify points were added in session 1 var points1 uint32 @@ -252,7 +242,6 @@ func TestSessionLifecycle_KoryoPointsPersistence(t *testing.T) { // Logout logoutPlayer(session1) - time.Sleep(100 * time.Millisecond) // ===== SESSION 2: Verify Koryo points persist ===== session2 := createTestSessionForServerWithChar(server, charID, "KoryoChar") @@ -341,14 +330,10 @@ func TestSessionLifecycle_MultipleDataTypesPersistence(t *testing.T) { } handleMsgMhfSavedata(session1, savePkt) - // Give handlers time to process - time.Sleep(100 * time.Millisecond) - t.Log("Modified all data types in session 1") // Logout logoutPlayer(session1) - time.Sleep(100 * time.Millisecond) // ===== SESSION 2: Verify all data persists ===== session2 := createTestSessionForServerWithChar(server, charID, "MultiChar") @@ -358,7 +343,6 @@ func TestSessionLifecycle_MultipleDataTypesPersistence(t *testing.T) { AckHandle: 5001, } handleMsgMhfLoaddata(session2, loadPkt) - time.Sleep(50 * time.Millisecond) allPassed := true @@ -472,13 +456,11 @@ func TestSessionLifecycle_DisconnectWithoutLogout(t *testing.T) { RawDataPayload: compressed, } handleMsgMhfSavedata(session1, savePkt) - time.Sleep(100 * time.Millisecond) // Simulate disconnect by calling logoutPlayer (which is called by recvLoop on EOF) // In real scenario, this is triggered by connection close t.Log("Simulating ungraceful disconnect") logoutPlayer(session1) - time.Sleep(100 * time.Millisecond) // ===== SESSION 2: Verify data saved despite ungraceful disconnect ===== session2 := createTestSessionForServerWithChar(server, charID, "DisconnectChar") @@ -544,7 +526,6 @@ func TestSessionLifecycle_RapidReconnect(t *testing.T) { // Logout quickly logoutPlayer(session) - time.Sleep(30 * time.Millisecond) // Verify points persisted var loadedPoints uint32 diff --git a/server/channelserver/sys_session_test.go b/server/channelserver/sys_session_test.go index 9a5ba7ac2..270ff67b4 100644 --- a/server/channelserver/sys_session_test.go +++ b/server/channelserver/sys_session_test.go @@ -116,11 +116,23 @@ func TestPacketQueueIndividualSending(t *testing.T) { } // Wait for packets to be processed - time.Sleep(100 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= tt.wantPackets { + break + } + time.Sleep(10 * time.Millisecond) + } // Stop the session s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline = time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= tt.wantPackets { + break + } + time.Sleep(10 * time.Millisecond) + } // Verify packet count sentPackets := mock.GetSentPackets() @@ -165,9 +177,21 @@ func TestPacketQueueNoConcatenation(t *testing.T) { s.sendPackets <- packet{packet2, true} s.sendPackets <- packet{packet3, true} - time.Sleep(100 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 3 { + break + } + time.Sleep(10 * time.Millisecond) + } s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline = time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 3 { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() @@ -218,7 +242,13 @@ func TestQueueSendUsesQueue(t *testing.T) { // Now start sendLoop and verify it gets sent go s.sendLoop() - time.Sleep(100 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } if mock.PacketCount() != 1 { t.Errorf("expected 1 packet sent after sendLoop, got %d", mock.PacketCount()) @@ -237,9 +267,21 @@ func TestPacketTerminatorFormat(t *testing.T) { testData := []byte{0x00, 0x01, 0xAA, 0xBB} s.sendPackets <- packet{testData, true} - time.Sleep(100 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline = time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != 1 { @@ -313,9 +355,21 @@ func TestPacketQueueAckFormat(t *testing.T) { ackData := []byte{0xAA, 0xBB, 0xCC, 0xDD} s.QueueAck(ackHandle, ackData) - time.Sleep(100 * time.Millisecond) + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } s.closed.Store(true) - time.Sleep(50 * time.Millisecond) + deadline = time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if mock.PacketCount() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } sentPackets := mock.GetSentPackets() if len(sentPackets) != 1 { From e1aa863a1ff257c1257f1b907f7f640672a996e2 Mon Sep 17 00:00:00 2001 From: Houmgaor Date: Mon, 23 Mar 2026 11:06:50 +0100 Subject: [PATCH 7/7] test(channelserver): reduce polling interval from 10ms to 1ms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit LoopDelay is 0 in test servers (bare struct, no Viper defaults), so sendLoop spins without delay and satisfies conditions in <1ms. The 10ms poll interval was the new bottleneck — dropping to 1ms cuts the channelserver test suite from ~136s to ~5s. --- server/channelserver/integration_test.go | 26 +++++++++---------- .../channelserver/sys_channel_server_test.go | 2 +- server/channelserver/sys_session_test.go | 20 +++++++------- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/server/channelserver/integration_test.go b/server/channelserver/integration_test.go index 3b5746316..fb22c6e24 100644 --- a/server/channelserver/integration_test.go +++ b/server/channelserver/integration_test.go @@ -89,7 +89,7 @@ func IntegrationTest_PacketQueueFlow(t *testing.T) { if mock.PacketCount() >= tt.wantPackets { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -186,7 +186,7 @@ done: if mock.PacketCount() >= expectedTotal { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -255,7 +255,7 @@ func IntegrationTest_AckPacketFlow(t *testing.T) { if mock.PacketCount() >= ackCount { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -330,7 +330,7 @@ func IntegrationTest_MixedPacketTypes(t *testing.T) { if mock.PacketCount() >= 4 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -385,7 +385,7 @@ func IntegrationTest_PacketOrderPreservation(t *testing.T) { if mock.PacketCount() >= packetCount { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -456,7 +456,7 @@ func IntegrationTest_QueueBackpressure(t *testing.T) { if mock.PacketCount() > 0 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } // Some packets should have been sent @@ -539,7 +539,7 @@ func IntegrationTest_GuildEnumerationFlow(t *testing.T) { if mock.PacketCount() >= tt.guildCount { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -614,7 +614,7 @@ func IntegrationTest_ConcurrentClientAccess(t *testing.T) { if mock.PacketCount() >= tt.packetsPerClient { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } s.closed.Store(true) deadline = time.Now().Add(2 * time.Second) @@ -622,7 +622,7 @@ func IntegrationTest_ConcurrentClientAccess(t *testing.T) { if mock.PacketCount() >= tt.packetsPerClient { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentCount := mock.PacketCount() @@ -693,7 +693,7 @@ func IntegrationTest_ClientVersionCompatibility(t *testing.T) { if mock.PacketCount() >= 1 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } s.closed.Store(true) deadline = time.Now().Add(2 * time.Second) @@ -701,7 +701,7 @@ func IntegrationTest_ClientVersionCompatibility(t *testing.T) { if mock.PacketCount() >= 1 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentCount := mock.PacketCount() @@ -742,7 +742,7 @@ func IntegrationTest_PacketPrioritization(t *testing.T) { if mock.PacketCount() >= 10 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -804,7 +804,7 @@ done: if mock.PacketCount() >= packetCount { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() diff --git a/server/channelserver/sys_channel_server_test.go b/server/channelserver/sys_channel_server_test.go index 0b90d3a9d..f0a477d4f 100644 --- a/server/channelserver/sys_channel_server_test.go +++ b/server/channelserver/sys_channel_server_test.go @@ -291,7 +291,7 @@ func TestBroadcastMHFAllSessions(t *testing.T) { if receivedCount == sessionCount { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } // Stop all sessions diff --git a/server/channelserver/sys_session_test.go b/server/channelserver/sys_session_test.go index 270ff67b4..10b26bdcc 100644 --- a/server/channelserver/sys_session_test.go +++ b/server/channelserver/sys_session_test.go @@ -121,7 +121,7 @@ func TestPacketQueueIndividualSending(t *testing.T) { if mock.PacketCount() >= tt.wantPackets { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } // Stop the session @@ -131,7 +131,7 @@ func TestPacketQueueIndividualSending(t *testing.T) { if mock.PacketCount() >= tt.wantPackets { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } // Verify packet count @@ -182,7 +182,7 @@ func TestPacketQueueNoConcatenation(t *testing.T) { if mock.PacketCount() >= 3 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } s.closed.Store(true) deadline = time.Now().Add(2 * time.Second) @@ -190,7 +190,7 @@ func TestPacketQueueNoConcatenation(t *testing.T) { if mock.PacketCount() >= 3 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -228,7 +228,7 @@ func TestQueueSendUsesQueue(t *testing.T) { s.QueueSend(testData) // Give it a moment - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) // WITHOUT sendLoop running, packets should NOT be sent yet if mock.PacketCount() > 0 { @@ -247,7 +247,7 @@ func TestQueueSendUsesQueue(t *testing.T) { if mock.PacketCount() >= 1 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } if mock.PacketCount() != 1 { @@ -272,7 +272,7 @@ func TestPacketTerminatorFormat(t *testing.T) { if mock.PacketCount() >= 1 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } s.closed.Store(true) deadline = time.Now().Add(2 * time.Second) @@ -280,7 +280,7 @@ func TestPacketTerminatorFormat(t *testing.T) { if mock.PacketCount() >= 1 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets() @@ -360,7 +360,7 @@ func TestPacketQueueAckFormat(t *testing.T) { if mock.PacketCount() >= 1 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } s.closed.Store(true) deadline = time.Now().Add(2 * time.Second) @@ -368,7 +368,7 @@ func TestPacketQueueAckFormat(t *testing.T) { if mock.PacketCount() >= 1 { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } sentPackets := mock.GetSentPackets()