test(channelserver): add store tests and document lock ordering

Add unit tests with race-detector coverage for QuestCache,
UserBinaryStore, and MinidataStore (18 tests covering hits, misses,
expiry, copy isolation, and concurrent access).

Document the lock acquisition order on the Server struct to prevent
future deadlocks: Server.Mutex → stagesLock → Stage → semaphoreLock.
This commit is contained in:
Houmgaor
2026-02-21 13:56:46 +01:00
parent 1d5026c3a5
commit da1e62d7c6
4 changed files with 247 additions and 0 deletions

View File

@@ -0,0 +1,57 @@
package channelserver
import (
"sync"
"testing"
)
func TestMinidataStore_GetMiss(t *testing.T) {
s := NewMinidataStore()
_, ok := s.Get(1)
if ok {
t.Error("expected miss for unknown charID")
}
}
func TestMinidataStore_SetGet(t *testing.T) {
s := NewMinidataStore()
data := []byte{0xAA, 0xBB}
s.Set(42, data)
got, ok := s.Get(42)
if !ok {
t.Fatal("expected hit")
}
if len(got) != 2 || got[0] != 0xAA {
t.Errorf("got %v, want [0xAA 0xBB]", got)
}
}
func TestMinidataStore_Overwrite(t *testing.T) {
s := NewMinidataStore()
s.Set(1, []byte{0x01})
s.Set(1, []byte{0x02})
got, _ := s.Get(1)
if got[0] != 0x02 {
t.Error("overwrite should replace previous value")
}
}
func TestMinidataStore_ConcurrentAccess(t *testing.T) {
s := NewMinidataStore()
var wg sync.WaitGroup
for i := uint32(0); i < 100; i++ {
wg.Add(2)
charID := i
go func() {
defer wg.Done()
s.Set(charID, []byte{byte(charID)})
}()
go func() {
defer wg.Done()
s.Get(charID)
}()
}
wg.Wait()
}

View File

@@ -0,0 +1,78 @@
package channelserver
import (
"sync"
"testing"
"time"
)
func TestQuestCache_GetMiss(t *testing.T) {
c := NewQuestCache(60)
_, ok := c.Get(999)
if ok {
t.Error("expected cache miss for unknown quest ID")
}
}
func TestQuestCache_PutGet(t *testing.T) {
c := NewQuestCache(60)
data := []byte{0xDE, 0xAD}
c.Put(1, data)
got, ok := c.Get(1)
if !ok {
t.Fatal("expected cache hit")
}
if len(got) != 2 || got[0] != 0xDE || got[1] != 0xAD {
t.Errorf("got %v, want [0xDE 0xAD]", got)
}
}
func TestQuestCache_Expiry(t *testing.T) {
c := NewQuestCache(0) // TTL=0 disables caching
c.Put(1, []byte{0x01})
_, ok := c.Get(1)
if ok {
t.Error("expected cache miss when TTL is 0")
}
}
func TestQuestCache_ExpiryElapsed(t *testing.T) {
c := &QuestCache{
data: make(map[int][]byte),
expiry: make(map[int]time.Time),
ttl: 50 * time.Millisecond,
}
c.Put(1, []byte{0x01})
// Should hit immediately
if _, ok := c.Get(1); !ok {
t.Fatal("expected cache hit before expiry")
}
time.Sleep(60 * time.Millisecond)
// Should miss after expiry
if _, ok := c.Get(1); ok {
t.Error("expected cache miss after expiry")
}
}
func TestQuestCache_ConcurrentAccess(t *testing.T) {
c := NewQuestCache(60)
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(2)
id := i
go func() {
defer wg.Done()
c.Put(id, []byte{byte(id)})
}()
go func() {
defer wg.Done()
c.Get(id)
}()
}
wg.Wait()
}

View File

@@ -30,6 +30,15 @@ type Config struct {
} }
// Server is a MHF channel server. // Server is a MHF channel server.
//
// Lock ordering (acquire in this order to avoid deadlocks):
// 1. Server.Mutex protects sessions map
// 2. Server.stagesLock protects stages map
// 3. Stage.RWMutex protects per-stage state (clients, objects)
// 4. Server.semaphoreLock protects semaphore map
//
// Self-contained stores (userBinary, minidata, questCache) manage their
// own locks internally and may be acquired at any point.
type Server struct { type Server struct {
sync.Mutex sync.Mutex
Channels []*Server Channels []*Server

View File

@@ -0,0 +1,103 @@
package channelserver
import (
"sync"
"testing"
)
func TestUserBinaryStore_GetMiss(t *testing.T) {
s := NewUserBinaryStore()
_, ok := s.Get(1, 1)
if ok {
t.Error("expected miss for unknown key")
}
}
func TestUserBinaryStore_SetGet(t *testing.T) {
s := NewUserBinaryStore()
data := []byte{0x01, 0x02, 0x03}
s.Set(100, 3, data)
got, ok := s.Get(100, 3)
if !ok {
t.Fatal("expected hit")
}
if len(got) != 3 || got[0] != 0x01 {
t.Errorf("got %v, want [1 2 3]", got)
}
}
func TestUserBinaryStore_DifferentIndexes(t *testing.T) {
s := NewUserBinaryStore()
s.Set(1, 1, []byte{0xAA})
s.Set(1, 2, []byte{0xBB})
got1, _ := s.Get(1, 1)
got2, _ := s.Get(1, 2)
if got1[0] != 0xAA || got2[0] != 0xBB {
t.Error("different indexes should store independent data")
}
}
func TestUserBinaryStore_Delete(t *testing.T) {
s := NewUserBinaryStore()
s.Set(1, 3, []byte{0x01})
s.Delete(1, 3)
_, ok := s.Get(1, 3)
if ok {
t.Error("expected miss after delete")
}
}
func TestUserBinaryStore_DeleteNonExistent(t *testing.T) {
s := NewUserBinaryStore()
s.Delete(999, 1) // should not panic
}
func TestUserBinaryStore_GetCopy(t *testing.T) {
s := NewUserBinaryStore()
s.Set(1, 3, []byte{0x01, 0x02})
cp := s.GetCopy(1, 3)
if cp[0] != 0x01 || cp[1] != 0x02 {
t.Fatal("copy data mismatch")
}
// Mutating the copy must not affect the store
cp[0] = 0xFF
orig, _ := s.Get(1, 3)
if orig[0] == 0xFF {
t.Error("GetCopy returned a reference, not a copy")
}
}
func TestUserBinaryStore_GetCopyMiss(t *testing.T) {
s := NewUserBinaryStore()
cp := s.GetCopy(999, 1)
if cp != nil {
t.Error("expected nil for missing key")
}
}
func TestUserBinaryStore_ConcurrentAccess(t *testing.T) {
s := NewUserBinaryStore()
var wg sync.WaitGroup
for i := uint32(0); i < 100; i++ {
wg.Add(3)
charID := i
go func() {
defer wg.Done()
s.Set(charID, 1, []byte{byte(charID)})
}()
go func() {
defer wg.Done()
s.Get(charID, 1)
}()
go func() {
defer wg.Done()
s.GetCopy(charID, 1)
}()
}
wg.Wait()
}