mirror of
https://github.com/Mezeporta/Erupe.git
synced 2026-03-25 00:54:05 +01:00
feat(savedata): add tier 1 data integrity protections
Prevent savedata corruption and denial-of-service by adding four layers of protection to the save pipeline: - Bounded decompression (nullcomp.DecompressWithLimit): caps output size to prevent OOM from crafted payloads that expand to exhaust memory - Bounds-checked delta patching (deltacomp.ApplyDataDiffWithLimit): validates offsets before writing, returns errors for negative offsets, truncated patches, and oversized output; ApplyDataDiff now returns original data on error instead of partial corruption - Size limits on save handlers: rejects compressed payloads >512KB and decompressed data >1MB before processing; applied to main savedata, platedata, and platebox diff paths - Rotating savedata backups: 3 slots per character with 30-minute interval, snapshots the previous state before overwriting, backed by new savedata_backups table (migration 0007)
This commit is contained in:
@@ -652,6 +652,73 @@ func TestConcurrentSaveData_Integration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Tier 1 protection tests
|
||||
// =============================================================================
|
||||
|
||||
func TestSaveDataSizeLimit(t *testing.T) {
|
||||
// Verify the size constants are sensible
|
||||
if saveDataMaxCompressedPayload <= 0 {
|
||||
t.Error("saveDataMaxCompressedPayload must be positive")
|
||||
}
|
||||
if saveDataMaxDecompressedPayload <= 0 {
|
||||
t.Error("saveDataMaxDecompressedPayload must be positive")
|
||||
}
|
||||
if saveDataMaxCompressedPayload > saveDataMaxDecompressedPayload {
|
||||
t.Error("compressed limit should not exceed decompressed limit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveDataSizeLimitRejectsOversized(t *testing.T) {
|
||||
server := createMockServer()
|
||||
session := createMockSession(1, server)
|
||||
|
||||
// Create a payload larger than the limit
|
||||
oversized := make([]byte, saveDataMaxCompressedPayload+1)
|
||||
pkt := &mhfpacket.MsgMhfSavedata{
|
||||
SaveType: 0,
|
||||
AckHandle: 1234,
|
||||
AllocMemSize: uint32(len(oversized)),
|
||||
DataSize: uint32(len(oversized)),
|
||||
RawDataPayload: oversized,
|
||||
}
|
||||
|
||||
// This should return early with a fail ACK, not panic
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("handleMsgMhfSavedata panicked on oversized payload: %v", r)
|
||||
}
|
||||
}()
|
||||
handleMsgMhfSavedata(session, pkt)
|
||||
}
|
||||
|
||||
func TestSaveDataSizeLimitAcceptsNormalPayload(t *testing.T) {
|
||||
// Verify a normal-sized payload passes the size check
|
||||
normalSize := 100000 // 100KB - typical save
|
||||
if normalSize > saveDataMaxCompressedPayload {
|
||||
t.Errorf("normal save size %d exceeds limit %d", normalSize, saveDataMaxCompressedPayload)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecompressWithLimitConstants(t *testing.T) {
|
||||
// Verify limits are consistent with known save sizes
|
||||
// ZZ save is ~147KB decompressed; limit should be well above that
|
||||
zzSaveSize := 150000
|
||||
if saveDataMaxDecompressedPayload < zzSaveSize*2 {
|
||||
t.Errorf("decompressed limit %d is too close to known ZZ save size %d",
|
||||
saveDataMaxDecompressedPayload, zzSaveSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupConstants(t *testing.T) {
|
||||
if saveBackupSlots <= 0 {
|
||||
t.Error("saveBackupSlots must be positive")
|
||||
}
|
||||
if saveBackupInterval <= 0 {
|
||||
t.Error("saveBackupInterval must be positive")
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Tests consolidated from handlers_coverage4_test.go
|
||||
// =============================================================================
|
||||
|
||||
Reference in New Issue
Block a user