chore(merge): merge main into develop

Resolves CHANGELOG.md conflict: preserve develop's [Unreleased] block,
insert the [9.3.1] section from main, remove the duplicate
DisableSaveIntegrityCheck entry that had been in [Unreleased].
This commit is contained in:
Houmgaor
2026-03-23 11:15:20 +01:00
20 changed files with 279 additions and 101 deletions

View File

@@ -153,13 +153,11 @@ func TestClientConnection_GracefulLoginLogout(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session, savePkt)
time.Sleep(100 * time.Millisecond)
// Client sends logout packet (graceful)
t.Log("Client sending logout packet")
logoutPkt := &mhfpacket.MsgSysLogout{}
handleMsgSysLogout(session, logoutPkt)
time.Sleep(100 * time.Millisecond)
// Verify connection closed
if !mockConn.IsClosed() {
@@ -220,13 +218,11 @@ func TestClientConnection_UngracefulDisconnect(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session, savePkt)
time.Sleep(100 * time.Millisecond)
// Simulate network failure - connection drops without logout packet
t.Log("Simulating network failure (no logout packet sent)")
// In real scenario, recvLoop would detect io.EOF and call logoutPlayer
logoutPlayer(session)
time.Sleep(100 * time.Millisecond)
// Verify data was saved despite ungraceful disconnect
var savedCompressed []byte
@@ -274,7 +270,6 @@ func TestClientConnection_SessionTimeout(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session, savePkt)
time.Sleep(100 * time.Millisecond)
// Simulate timeout by setting lastPacket to long ago
session.lastPacket = time.Now().Add(-35 * time.Second)
@@ -283,7 +278,6 @@ func TestClientConnection_SessionTimeout(t *testing.T) {
// and call logoutPlayer(session)
t.Log("Session timed out (>30s since last packet)")
logoutPlayer(session)
time.Sleep(100 * time.Millisecond)
// Verify data saved
var savedCompressed []byte
@@ -346,11 +340,9 @@ func TestClientConnection_MultipleClientsSimultaneous(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session, savePkt)
time.Sleep(50 * time.Millisecond)
// Graceful logout
logoutPlayer(session)
time.Sleep(50 * time.Millisecond)
// Verify individual client's data
var savedCompressed []byte
@@ -416,12 +408,10 @@ func TestClientConnection_SaveDuringCombat(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session, savePkt)
time.Sleep(100 * time.Millisecond)
// Disconnect while in stage
t.Log("Player disconnects during quest")
logoutPlayer(session)
time.Sleep(100 * time.Millisecond)
// Verify data saved even during combat
var savedCompressed []byte
@@ -474,12 +464,10 @@ func TestClientConnection_ReconnectAfterCrash(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session1, savePkt)
time.Sleep(50 * time.Millisecond)
// Client crashes (ungraceful disconnect)
t.Log("Client crashes (no logout packet)")
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// Client reconnects immediately
t.Log("Client reconnects after crash")
@@ -492,7 +480,6 @@ func TestClientConnection_ReconnectAfterCrash(t *testing.T) {
AckHandle: 18001,
}
handleMsgMhfLoaddata(session2, loadPkt)
time.Sleep(50 * time.Millisecond)
// Verify data from before crash
var savedCompressed []byte

View File

@@ -55,16 +55,23 @@ func GetCharacterSaveData(s *Session, charID uint32) (*CharacterSaveData, error)
// Verify integrity checksum if one was stored with this save.
// A nil hash means the character was saved before checksums were introduced,
// so we skip verification (the next save will compute and store the hash).
if storedHash != nil {
// DisableSaveIntegrityCheck bypasses this entirely for cross-server save transfers.
if storedHash != nil && !s.server.erupeConfig.DisableSaveIntegrityCheck {
computedHash := sha256.Sum256(saveData.decompSave)
if !bytes.Equal(storedHash, computedHash[:]) {
s.logger.Error("Savedata integrity check failed: hash mismatch",
s.logger.Error("Savedata integrity check failed: hash mismatch — "+
"if this character was imported from another server, set DisableSaveIntegrityCheck=true in config.json "+
"or run: UPDATE characters SET savedata_hash = NULL WHERE id = <charID>",
zap.Uint32("charID", charID),
zap.Binary("stored_hash", storedHash),
zap.Binary("computed_hash", computedHash[:]),
)
return recoverFromBackups(s, saveData, charID)
}
} else if storedHash != nil && s.server.erupeConfig.DisableSaveIntegrityCheck {
s.logger.Warn("Savedata integrity check skipped (DisableSaveIntegrityCheck=true)",
zap.Uint32("charID", charID),
)
}
saveData.updateStructWithSaveData()

View File

@@ -885,3 +885,68 @@ func TestGetCharacterSaveData_ConfigMode(t *testing.T) {
})
}
}
// TestGetCharacterSaveData_IntegrityCheck verifies the SHA-256 hash guard and
// that DisableSaveIntegrityCheck bypasses it without returning an error.
func TestGetCharacterSaveData_IntegrityCheck(t *testing.T) {
// Build a minimal valid savedata blob and compress it.
rawSave := make([]byte, 150000)
copy(rawSave[88:], []byte("TestChar\x00"))
compressed, err := nullcomp.Compress(rawSave)
if err != nil {
t.Fatalf("compress: %v", err)
}
// A hash that deliberately does NOT match rawSave.
wrongHash := bytes.Repeat([]byte{0xDE}, 32)
tests := []struct {
name string
disable bool
hash []byte
wantErr bool
}{
{
name: "nil hash skips check",
disable: false,
hash: nil,
wantErr: false,
},
{
name: "mismatched hash fails when check enabled",
disable: false,
hash: wrongHash,
wantErr: true,
},
{
name: "mismatched hash passes when check disabled",
disable: true,
hash: wrongHash,
wantErr: false,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
mock := newMockCharacterRepo()
mock.loadSaveDataID = 1
mock.loadSaveDataData = compressed
mock.loadSaveDataName = "TestChar"
mock.loadSaveDataHash = tc.hash
server := createMockServer()
server.erupeConfig.RealClientMode = cfg.ZZ
server.erupeConfig.DisableSaveIntegrityCheck = tc.disable
server.charRepo = mock
session := createMockSession(1, server)
_, err := GetCharacterSaveData(session, 1)
if tc.wantErr && err == nil {
t.Error("expected error, got nil")
}
if !tc.wantErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
})
}
}

View File

@@ -229,7 +229,6 @@ func TestRengokuData_SaveLoadRoundTrip_AcrossSessions(t *testing.T) {
// Logout session 1
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// === SESSION 2: Load data in new session ===
session2 := createTestSessionForServerWithChar(server, charID, "RengokuChar2")
@@ -348,7 +347,6 @@ func TestRengokuData_SkillRegionPreserved(t *testing.T) {
handleMsgMhfSaveRengokuData(session1, savePkt)
drainAck(t, session1)
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// === SESSION 2: Load and verify skill region ===
session2 := createTestSessionForServerWithChar(server, charID, "SkillChar")

View File

@@ -3,7 +3,6 @@ package channelserver
import (
"bytes"
"testing"
"time"
"erupe-ce/common/mhfitem"
cfg "erupe-ce/config"
@@ -617,9 +616,6 @@ func TestPlateDataPersistenceDuringLogout(t *testing.T) {
t.Log("Triggering logout via logoutPlayer")
logoutPlayer(session)
// Give logout time to complete
time.Sleep(100 * time.Millisecond)
// ===== VERIFICATION: Check all plate data was saved =====
t.Log("--- Verifying plate data persisted ---")

View File

@@ -84,7 +84,13 @@ func IntegrationTest_PacketQueueFlow(t *testing.T) {
done:
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= tt.wantPackets {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != tt.wantPackets {
@@ -175,7 +181,13 @@ func IntegrationTest_ConcurrentQueueing(t *testing.T) {
done:
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= expectedTotal {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != expectedTotal {
@@ -237,9 +249,14 @@ func IntegrationTest_AckPacketFlow(t *testing.T) {
}
// Wait for ACKs to be sent
time.Sleep(200 * time.Millisecond)
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= ackCount {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != ackCount {
@@ -307,9 +324,14 @@ func IntegrationTest_MixedPacketTypes(t *testing.T) {
s.QueueSendNonBlocking([]byte{0x00, 0x03, 0xEE})
// Wait for all packets
time.Sleep(200 * time.Millisecond)
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 4 {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != 4 {
@@ -357,9 +379,14 @@ func IntegrationTest_PacketOrderPreservation(t *testing.T) {
}
// Wait for packets
time.Sleep(300 * time.Millisecond)
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= packetCount {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != packetCount {
@@ -423,9 +450,14 @@ func IntegrationTest_QueueBackpressure(t *testing.T) {
}
// Wait for processing
time.Sleep(1 * time.Second)
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() > 0 {
break
}
time.Sleep(1 * time.Millisecond)
}
// Some packets should have been sent
sentCount := mock.PacketCount()
@@ -502,7 +534,13 @@ func IntegrationTest_GuildEnumerationFlow(t *testing.T) {
done:
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= tt.guildCount {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != tt.guildCount {
@@ -571,9 +609,21 @@ func IntegrationTest_ConcurrentClientAccess(t *testing.T) {
s.QueueSend(testData)
}
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= tt.packetsPerClient {
break
}
time.Sleep(1 * time.Millisecond)
}
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline = time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= tt.packetsPerClient {
break
}
time.Sleep(1 * time.Millisecond)
}
sentCount := mock.PacketCount()
mu.Lock()
@@ -638,9 +688,21 @@ func IntegrationTest_ClientVersionCompatibility(t *testing.T) {
testData := []byte{0x00, 0x01, 0xAA, 0xBB}
s.QueueSend(testData)
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 1 {
break
}
time.Sleep(1 * time.Millisecond)
}
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline = time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 1 {
break
}
time.Sleep(1 * time.Millisecond)
}
sentCount := mock.PacketCount()
if (sentCount > 0) != tt.shouldSucceed {
@@ -674,9 +736,14 @@ func IntegrationTest_PacketPrioritization(t *testing.T) {
s.QueueSend([]byte{0x00, byte(i), 0xDD})
}
time.Sleep(200 * time.Millisecond)
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 10 {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) < 10 {
@@ -732,7 +799,13 @@ func IntegrationTest_DataIntegrityUnderLoad(t *testing.T) {
done:
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= packetCount {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != packetCount {

View File

@@ -133,6 +133,7 @@ type mockCharacterRepo struct {
loadSaveDataData []byte
loadSaveDataNew bool
loadSaveDataName string
loadSaveDataHash []byte
loadSaveDataErr error
// ReadEtcPoints mock fields
@@ -245,7 +246,7 @@ func (m *mockCharacterRepo) SaveBackup(_ uint32, _ int, _ []byte) error {
func (m *mockCharacterRepo) GetLastBackupTime(_ uint32) (time.Time, error) { return time.Time{}, nil }
func (m *mockCharacterRepo) SaveCharacterDataAtomic(_ SaveAtomicParams) error { return nil }
func (m *mockCharacterRepo) LoadSaveDataWithHash(_ uint32) (uint32, []byte, bool, string, []byte, error) {
return m.loadSaveDataID, m.loadSaveDataData, m.loadSaveDataNew, m.loadSaveDataName, nil, m.loadSaveDataErr
return m.loadSaveDataID, m.loadSaveDataData, m.loadSaveDataNew, m.loadSaveDataName, m.loadSaveDataHash, m.loadSaveDataErr
}
func (m *mockCharacterRepo) LoadBackupsByRecency(_ uint32) ([]SavedataBackup, error) {
return []SavedataBackup{}, nil

View File

@@ -155,13 +155,11 @@ func TestMonitored_SaveHandlerInvocationDuringLogout(t *testing.T) {
t.Log("Calling handleMsgMhfSavedata during session")
handleMsgMhfSavedata(session, savePkt)
monitor.RecordSavedata()
time.Sleep(100 * time.Millisecond)
// Now trigger logout
t.Log("Triggering logout - monitoring if save handlers are called")
monitor.RecordLogout()
logoutPlayer(session)
time.Sleep(100 * time.Millisecond)
// Report statistics
t.Log(monitor.GetStats())
@@ -233,12 +231,10 @@ func TestWithLogging_LogoutFlowAnalysis(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session, savePkt)
time.Sleep(50 * time.Millisecond)
// Trigger logout
t.Log("Triggering logout with logging enabled")
logoutPlayer(session)
time.Sleep(100 * time.Millisecond)
// Analyze logs
allLogs := logs.All()
@@ -317,11 +313,9 @@ func TestConcurrent_MultipleSessionsSaving(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session, savePkt)
time.Sleep(50 * time.Millisecond)
// Logout
logoutPlayer(session)
time.Sleep(50 * time.Millisecond)
// Verify data saved
var savedCompressed []byte
@@ -376,11 +370,9 @@ func TestSequential_RepeatedLogoutLoginCycles(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session, savePkt)
time.Sleep(50 * time.Millisecond)
// Logout
logoutPlayer(session)
time.Sleep(50 * time.Millisecond)
// Verify data after each cycle
var savedCompressed []byte
@@ -452,7 +444,6 @@ func TestRealtime_SaveDataTimestamps(t *testing.T) {
events = append(events, SaveEvent{time.Now(), "logout_start"})
logoutPlayer(session)
events = append(events, SaveEvent{time.Now(), "logout_end"})
time.Sleep(50 * time.Millisecond)
// Print timeline
t.Log("Save event timeline:")

View File

@@ -84,16 +84,10 @@ func TestSessionLifecycle_BasicSaveLoadCycle(t *testing.T) {
t.Log("Sending savedata packet")
handleMsgMhfSavedata(session1, savePkt)
// Drain ACK
time.Sleep(100 * time.Millisecond)
// Now trigger logout via the actual logout flow
t.Log("Triggering logout via logoutPlayer")
logoutPlayer(session1)
// Give logout time to complete
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Login again and verify data =====
t.Log("--- Starting Session 2: Login and verify data persists ---")
@@ -106,8 +100,6 @@ func TestSessionLifecycle_BasicSaveLoadCycle(t *testing.T) {
}
handleMsgMhfLoaddata(session2, loadPkt)
time.Sleep(50 * time.Millisecond)
// Verify savedata persisted
var savedCompressed []byte
err = db.QueryRow("SELECT savedata FROM characters WHERE id = $1", charID).Scan(&savedCompressed)
@@ -189,7 +181,6 @@ func TestSessionLifecycle_WarehouseDataPersistence(t *testing.T) {
// Logout
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Verify warehouse contents =====
session2 := createTestSessionForServerWithChar(server, charID, "WarehouseChar")
@@ -240,7 +231,6 @@ func TestSessionLifecycle_KoryoPointsPersistence(t *testing.T) {
t.Logf("Adding %d Koryo points", addPoints)
handleMsgMhfAddKouryouPoint(session1, pkt)
time.Sleep(50 * time.Millisecond)
// Verify points were added in session 1
var points1 uint32
@@ -252,7 +242,6 @@ func TestSessionLifecycle_KoryoPointsPersistence(t *testing.T) {
// Logout
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Verify Koryo points persist =====
session2 := createTestSessionForServerWithChar(server, charID, "KoryoChar")
@@ -341,14 +330,10 @@ func TestSessionLifecycle_MultipleDataTypesPersistence(t *testing.T) {
}
handleMsgMhfSavedata(session1, savePkt)
// Give handlers time to process
time.Sleep(100 * time.Millisecond)
t.Log("Modified all data types in session 1")
// Logout
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Verify all data persists =====
session2 := createTestSessionForServerWithChar(server, charID, "MultiChar")
@@ -358,7 +343,6 @@ func TestSessionLifecycle_MultipleDataTypesPersistence(t *testing.T) {
AckHandle: 5001,
}
handleMsgMhfLoaddata(session2, loadPkt)
time.Sleep(50 * time.Millisecond)
allPassed := true
@@ -472,13 +456,11 @@ func TestSessionLifecycle_DisconnectWithoutLogout(t *testing.T) {
RawDataPayload: compressed,
}
handleMsgMhfSavedata(session1, savePkt)
time.Sleep(100 * time.Millisecond)
// Simulate disconnect by calling logoutPlayer (which is called by recvLoop on EOF)
// In real scenario, this is triggered by connection close
t.Log("Simulating ungraceful disconnect")
logoutPlayer(session1)
time.Sleep(100 * time.Millisecond)
// ===== SESSION 2: Verify data saved despite ungraceful disconnect =====
session2 := createTestSessionForServerWithChar(server, charID, "DisconnectChar")
@@ -544,7 +526,6 @@ func TestSessionLifecycle_RapidReconnect(t *testing.T) {
// Logout quickly
logoutPlayer(session)
time.Sleep(30 * time.Millisecond)
// Verify points persisted
var loadedPoints uint32

View File

@@ -279,13 +279,26 @@ func TestBroadcastMHFAllSessions(t *testing.T) {
testPkt := &mhfpacket.MsgSysNop{}
server.BroadcastMHF(testPkt, nil)
time.Sleep(100 * time.Millisecond)
// Poll until all sessions have received the packet or the deadline is reached.
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
receivedCount := 0
for _, sess := range server.sessions {
mock := sess.cryptConn.(*MockCryptConn)
if mock.PacketCount() > 0 {
receivedCount++
}
}
if receivedCount == sessionCount {
break
}
time.Sleep(1 * time.Millisecond)
}
// Stop all sessions
for _, sess := range sessions {
sess.closed.Store(true)
}
time.Sleep(50 * time.Millisecond)
// Verify all sessions received the packet
receivedCount := 0

View File

@@ -116,11 +116,23 @@ func TestPacketQueueIndividualSending(t *testing.T) {
}
// Wait for packets to be processed
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= tt.wantPackets {
break
}
time.Sleep(1 * time.Millisecond)
}
// Stop the session
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline = time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= tt.wantPackets {
break
}
time.Sleep(1 * time.Millisecond)
}
// Verify packet count
sentPackets := mock.GetSentPackets()
@@ -165,9 +177,21 @@ func TestPacketQueueNoConcatenation(t *testing.T) {
s.sendPackets <- packet{packet2, true}
s.sendPackets <- packet{packet3, true}
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 3 {
break
}
time.Sleep(1 * time.Millisecond)
}
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline = time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 3 {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
@@ -204,7 +228,7 @@ func TestQueueSendUsesQueue(t *testing.T) {
s.QueueSend(testData)
// Give it a moment
time.Sleep(10 * time.Millisecond)
time.Sleep(1 * time.Millisecond)
// WITHOUT sendLoop running, packets should NOT be sent yet
if mock.PacketCount() > 0 {
@@ -218,7 +242,13 @@ func TestQueueSendUsesQueue(t *testing.T) {
// Now start sendLoop and verify it gets sent
go s.sendLoop()
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 1 {
break
}
time.Sleep(1 * time.Millisecond)
}
if mock.PacketCount() != 1 {
t.Errorf("expected 1 packet sent after sendLoop, got %d", mock.PacketCount())
@@ -237,9 +267,21 @@ func TestPacketTerminatorFormat(t *testing.T) {
testData := []byte{0x00, 0x01, 0xAA, 0xBB}
s.sendPackets <- packet{testData, true}
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 1 {
break
}
time.Sleep(1 * time.Millisecond)
}
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline = time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 1 {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != 1 {
@@ -313,9 +355,21 @@ func TestPacketQueueAckFormat(t *testing.T) {
ackData := []byte{0xAA, 0xBB, 0xCC, 0xDD}
s.QueueAck(ackHandle, ackData)
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 1 {
break
}
time.Sleep(1 * time.Millisecond)
}
s.closed.Store(true)
time.Sleep(50 * time.Millisecond)
deadline = time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
if mock.PacketCount() >= 1 {
break
}
time.Sleep(1 * time.Millisecond)
}
sentPackets := mock.GetSentPackets()
if len(sentPackets) != 1 {