feat(savedata): add tier 1 data integrity protections

Prevent savedata corruption and denial-of-service by adding four layers
of protection to the save pipeline:

- Bounded decompression (nullcomp.DecompressWithLimit): caps output size
  to prevent OOM from crafted payloads that expand to exhaust memory
- Bounds-checked delta patching (deltacomp.ApplyDataDiffWithLimit):
  validates offsets before writing, returns errors for negative offsets,
  truncated patches, and oversized output; ApplyDataDiff now returns
  original data on error instead of partial corruption
- Size limits on save handlers: rejects compressed payloads >512KB and
  decompressed data >1MB before processing; applied to main savedata,
  platedata, and platebox diff paths
- Rotating savedata backups: 3 slots per character with 30-minute
  interval, snapshots the previous state before overwriting, backed by
  new savedata_backups table (migration 0007)
This commit is contained in:
Houmgaor
2026-03-17 19:03:43 +01:00
parent 5009a37d19
commit b40217c7fe
13 changed files with 478 additions and 28 deletions

View File

@@ -2,6 +2,7 @@ package deltacomp
import (
"bytes"
"fmt"
"io"
"go.uber.org/zap"
@@ -49,8 +50,21 @@ func readCount(r *bytes.Reader) (int, error) {
// ApplyDataDiff applies a delta data diff patch onto given base data.
func ApplyDataDiff(diff []byte, baseData []byte) []byte {
// Make a copy of the base data to return,
// (probably just make this modify the given slice in the future).
result, err := ApplyDataDiffWithLimit(diff, baseData, 0)
if err != nil {
zap.L().Error("ApplyDataDiff failed", zap.Error(err))
// Return original data on error to avoid corruption
out := make([]byte, len(baseData))
copy(out, baseData)
return out
}
return result
}
// ApplyDataDiffWithLimit applies a delta data diff patch onto given base data.
// If maxOutput > 0, the result is capped at that size; exceeding it returns an error.
// If maxOutput == 0, no limit is enforced (backwards-compatible behavior).
func ApplyDataDiffWithLimit(diff []byte, baseData []byte, maxOutput int) ([]byte, error) {
baseCopy := make([]byte, len(baseData))
copy(baseCopy, baseData)
@@ -76,32 +90,35 @@ func ApplyDataDiff(diff []byte, baseData []byte) []byte {
}
differentCount--
// Grow slice if it's required
if len(baseCopy) < dataOffset {
zap.L().Warn("Slice smaller than data offset, growing slice")
baseCopy = append(baseCopy, make([]byte, (dataOffset+differentCount)-len(baseData))...)
} else {
length := len(baseCopy[dataOffset:])
if length < differentCount {
length -= differentCount
baseCopy = append(baseCopy, make([]byte, length)...)
}
if dataOffset < 0 {
return nil, fmt.Errorf("negative data offset %d", dataOffset)
}
if differentCount < 0 {
return nil, fmt.Errorf("negative different count %d at offset %d", differentCount, dataOffset)
}
endOffset := dataOffset + differentCount
if maxOutput > 0 && endOffset > maxOutput {
return nil, fmt.Errorf("patch writes to offset %d, exceeds limit %d", endOffset, maxOutput)
}
// Grow slice if required
if endOffset > len(baseCopy) {
baseCopy = append(baseCopy, make([]byte, endOffset-len(baseCopy))...)
}
// Apply the patch bytes.
for i := 0; i < differentCount; i++ {
b, err := checkReadUint8(patch)
if err != nil {
zap.L().Error("Invalid or misunderstood patch format", zap.Int("dataOffset", dataOffset))
return baseCopy
return nil, fmt.Errorf("truncated patch at offset %d+%d: %w", dataOffset, i, err)
}
baseCopy[dataOffset+i] = b
}
dataOffset += differentCount - 1
}
return baseCopy
return baseCopy, nil
}

View File

@@ -75,6 +75,83 @@ func readTestDataFile(filename string) []byte {
return data
}
func TestApplyDataDiffWithLimit_BoundsCheck(t *testing.T) {
// Base data: 10 bytes
baseData := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A}
// Build a patch that tries to write at offset 8 with 5 different bytes,
// which would extend to offset 13 (beyond 10-byte base).
// Format: matchCount=9 (first is +1), differentCount=6 (is -1 = 5 bytes)
diff := []byte{
0x09, // matchCount (first is +1, so offset becomes -1+9=8)
0x06, // differentCount (6-1=5 different bytes)
0xAA, 0xBB, 0xCC, 0xDD, 0xEE, // 5 patch bytes
}
t.Run("within_limit", func(t *testing.T) {
// Limit of 20 allows the growth
result, err := ApplyDataDiffWithLimit(diff, baseData, 20)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(result) < 13 {
t.Errorf("expected result length >= 13, got %d", len(result))
}
})
t.Run("exceeds_limit", func(t *testing.T) {
// Limit of 10 doesn't allow writing past the base
_, err := ApplyDataDiffWithLimit(diff, baseData, 10)
if err == nil {
t.Error("expected error for write past limit, got none")
}
})
t.Run("no_limit", func(t *testing.T) {
// maxOutput=0 means no limit (backwards compatible)
result, err := ApplyDataDiffWithLimit(diff, baseData, 0)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(result) < 13 {
t.Errorf("expected result length >= 13, got %d", len(result))
}
})
}
func TestApplyDataDiffWithLimit_TruncatedPatch(t *testing.T) {
baseData := []byte{0x01, 0x02, 0x03, 0x04}
// Patch claims 3 different bytes but only provides 1
diff := []byte{
0x02, // matchCount (offset = -1+2 = 1)
0x04, // differentCount (4-1=3 different bytes)
0xAA, // only 1 byte provided (missing 2)
}
_, err := ApplyDataDiffWithLimit(diff, baseData, 100)
if err == nil {
t.Error("expected error for truncated patch, got none")
}
}
func TestApplyDataDiff_ReturnsOriginalOnError(t *testing.T) {
baseData := []byte{0x01, 0x02, 0x03, 0x04}
// Truncated patch
diff := []byte{
0x02,
0x04,
0xAA, // only 1 of 3 expected bytes
}
result := ApplyDataDiff(diff, baseData)
// On error, ApplyDataDiff should return the original data unchanged
if !bytes.Equal(result, baseData) {
t.Errorf("expected original data on error, got %v", result)
}
}
func TestDeltaPatch(t *testing.T) {
for k, tt := range tests {
testname := fmt.Sprintf("delta_patch_test_%d", k)

View File

@@ -2,6 +2,7 @@ package nullcomp
import (
"bytes"
"fmt"
"io"
)
@@ -49,6 +50,61 @@ func Decompress(compData []byte) ([]byte, error) {
return output, nil
}
// DecompressWithLimit decompresses null-compressed data, returning an error if
// the decompressed output would exceed maxOutput bytes. This prevents
// denial-of-service via crafted payloads that expand to exhaust memory.
func DecompressWithLimit(compData []byte, maxOutput int) ([]byte, error) {
r := bytes.NewReader(compData)
header := make([]byte, 16)
n, err := r.Read(header)
if err != nil {
return nil, err
} else if n != len(header) {
return nil, err
}
// Just return the data if it doesn't contain the cmp header.
if !bytes.Equal(header, []byte("cmp\x2020110113\x20\x20\x20\x00")) {
if len(compData) > maxOutput {
return nil, fmt.Errorf("uncompressed data size %d exceeds limit %d", len(compData), maxOutput)
}
return compData, nil
}
var output []byte
for {
b, err := r.ReadByte()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
if b == 0 {
// If it's a null byte, then the next byte is how many nulls to add.
nullCount, err := r.ReadByte()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
if len(output)+int(nullCount) > maxOutput {
return nil, fmt.Errorf("decompressed size exceeds limit %d", maxOutput)
}
output = append(output, make([]byte, int(nullCount))...)
} else {
if len(output)+1 > maxOutput {
return nil, fmt.Errorf("decompressed size exceeds limit %d", maxOutput)
}
output = append(output, b)
}
}
return output, nil
}
// Compress null compresses give given data.
func Compress(rawData []byte) ([]byte, error) {
r := bytes.NewReader(rawData)

View File

@@ -362,6 +362,110 @@ func TestDecompress_EdgeCases(t *testing.T) {
}
}
// === DecompressWithLimit tests ===
func TestDecompressWithLimit_RespectsLimit(t *testing.T) {
// Compress data that decompresses to 1000 bytes
input := make([]byte, 1000)
for i := range input {
input[i] = byte(i % 256)
}
compressed, err := Compress(input)
if err != nil {
t.Fatalf("Compress() error = %v", err)
}
tests := []struct {
name string
maxOutput int
wantErr bool
}{
{
name: "limit larger than data",
maxOutput: 2000,
wantErr: false,
},
{
name: "limit equal to data",
maxOutput: 1000,
wantErr: false,
},
{
name: "limit smaller than data",
maxOutput: 500,
wantErr: true,
},
{
name: "limit of 1",
maxOutput: 1,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := DecompressWithLimit(compressed, tt.maxOutput)
if tt.wantErr {
if err == nil {
t.Error("expected error but got none")
}
} else {
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !bytes.Equal(result, input) {
t.Error("decompressed data doesn't match original")
}
}
})
}
}
func TestDecompressWithLimit_NullExpansionBomb(t *testing.T) {
// Craft a payload that would expand to a huge size via null runs:
// header + 0x00 0xFF repeated many times
var payload []byte
payload = append(payload, []byte("cmp\x2020110113\x20\x20\x20\x00")...)
for i := 0; i < 1000; i++ {
payload = append(payload, 0x00, 0xFF) // each pair = 255 null bytes
}
// Total decompressed would be 255,000 bytes
_, err := DecompressWithLimit(payload, 10000)
if err == nil {
t.Error("expected error for null expansion bomb, got none")
}
}
func TestDecompressWithLimit_UncompressedDataExceedsLimit(t *testing.T) {
// Data without cmp header that exceeds the limit
data := make([]byte, 100)
for i := range data {
data[i] = byte(i + 1) // non-zero so it's not confused with compressed
}
_, err := DecompressWithLimit(data, 50)
if err == nil {
t.Error("expected error for uncompressed data exceeding limit")
}
}
func TestDecompressWithLimit_RoundTrip(t *testing.T) {
input := []byte("Hello\x00\x00\x00World\x00\x00End")
compressed, err := Compress(input)
if err != nil {
t.Fatalf("Compress() error = %v", err)
}
result, err := DecompressWithLimit(compressed, 1024)
if err != nil {
t.Fatalf("DecompressWithLimit() error = %v", err)
}
if !bytes.Equal(result, input) {
t.Errorf("round trip failed: got %v, want %v", result, input)
}
}
func BenchmarkCompress(b *testing.B) {
data := make([]byte, 10000)
// Fill with some pattern (half nulls, half data)

View File

@@ -4,6 +4,7 @@ import (
"database/sql"
"errors"
"fmt"
"time"
cfg "erupe-ce/config"
"erupe-ce/network/mhfpacket"
@@ -11,6 +12,12 @@ import (
"go.uber.org/zap"
)
// Backup configuration constants.
const (
saveBackupSlots = 3 // number of rotating backup slots per character
saveBackupInterval = 30 * time.Minute // minimum time between backups
)
// GetCharacterSaveData loads a character's save data from the database.
func GetCharacterSaveData(s *Session, charID uint32) (*CharacterSaveData, error) {
id, savedata, isNew, name, err := s.server.charRepo.LoadSaveData(charID)
@@ -55,6 +62,10 @@ func (save *CharacterSaveData) Save(s *Session) error {
return errors.New("no decompressed save data")
}
// Capture the previous compressed savedata before it's overwritten by
// Compress(). This is what gets backed up — the last known-good state.
prevCompSave := save.compSave
if !s.kqfOverride {
s.kqf = save.KQF
} else {
@@ -74,6 +85,14 @@ func (save *CharacterSaveData) Save(s *Session) error {
save.compSave = save.decompSave
}
// Time-gated rotating backup: snapshot the previous compressed savedata
// before overwriting, but only if enough time has elapsed since the last
// backup. This keeps storage bounded (3 slots × blob size per character)
// while providing recovery points.
if len(prevCompSave) > 0 {
maybeSaveBackup(s, save.CharID, prevCompSave)
}
if err := s.server.charRepo.SaveCharacterData(save.CharID, save.compSave, save.HR, save.GR, save.Gender, save.WeaponType, save.WeaponID); err != nil {
s.logger.Error("Failed to update savedata", zap.Error(err), zap.Uint32("charID", save.CharID))
return fmt.Errorf("save character data: %w", err)
@@ -87,6 +106,37 @@ func (save *CharacterSaveData) Save(s *Session) error {
return nil
}
// maybeSaveBackup checks whether enough time has elapsed since the last backup
// and, if so, writes the given compressed savedata into the next rotating slot.
// Errors are logged but do not block the save — backups are best-effort.
func maybeSaveBackup(s *Session, charID uint32, compSave []byte) {
lastBackup, err := s.server.charRepo.GetLastBackupTime(charID)
if err != nil {
s.logger.Warn("Failed to query last backup time, skipping backup",
zap.Error(err), zap.Uint32("charID", charID))
return
}
if time.Since(lastBackup) < saveBackupInterval {
return
}
// Pick the next slot using a simple counter derived from the backup times.
// We rotate through slots 0, 1, 2 based on how many backups exist modulo
// the slot count. In practice this fills slots in order and then overwrites
// the oldest.
slot := int(lastBackup.Unix()/int64(saveBackupInterval.Seconds())) % saveBackupSlots
if err := s.server.charRepo.SaveBackup(charID, slot, compSave); err != nil {
s.logger.Warn("Failed to save backup",
zap.Error(err), zap.Uint32("charID", charID), zap.Int("slot", slot))
return
}
s.logger.Info("Savedata backup created",
zap.Uint32("charID", charID), zap.Int("slot", slot))
}
func handleMsgMhfSexChanger(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgMhfSexChanger)
doAckSimpleSucceed(s, pkt.AckHandle, make([]byte, 4))

View File

@@ -17,8 +17,27 @@ import (
"go.uber.org/zap"
)
// Save data size limits.
// The largest known decompressed savedata is ZZ at ~147KB. We use generous
// ceilings to accommodate unknown versions while still catching runaway data.
const (
saveDataMaxCompressedPayload = 524288 // 512KB max compressed payload from client
saveDataMaxDecompressedPayload = 1048576 // 1MB max decompressed savedata
)
func handleMsgMhfSavedata(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgMhfSavedata)
if len(pkt.RawDataPayload) > saveDataMaxCompressedPayload {
s.logger.Warn("Savedata payload exceeds size limit",
zap.Int("len", len(pkt.RawDataPayload)),
zap.Int("max", saveDataMaxCompressedPayload),
zap.Uint32("charID", s.charID),
)
doAckSimpleFail(s, pkt.AckHandle, make([]byte, 4))
return
}
characterSaveData, err := GetCharacterSaveData(s, s.charID)
if err != nil {
s.logger.Error("failed to retrieve character save data from db", zap.Error(err), zap.Uint32("charID", s.charID))
@@ -34,19 +53,25 @@ func handleMsgMhfSavedata(s *Session, p mhfpacket.MHFPacket) {
if pkt.SaveType == 1 {
// Diff-based update.
// diffs themselves are also potentially compressed
diff, err := nullcomp.Decompress(pkt.RawDataPayload)
diff, err := nullcomp.DecompressWithLimit(pkt.RawDataPayload, saveDataMaxDecompressedPayload)
if err != nil {
s.logger.Error("Failed to decompress diff", zap.Error(err))
doAckSimpleFail(s, pkt.AckHandle, make([]byte, 4))
return
}
// Perform diff.
// Perform diff with bounds checking.
s.logger.Info("Diffing...")
characterSaveData.decompSave = deltacomp.ApplyDataDiff(diff, characterSaveData.decompSave)
patched, err := deltacomp.ApplyDataDiffWithLimit(diff, characterSaveData.decompSave, saveDataMaxDecompressedPayload)
if err != nil {
s.logger.Error("Failed to apply save diff", zap.Error(err), zap.Uint32("charID", s.charID))
doAckSimpleFail(s, pkt.AckHandle, make([]byte, 4))
return
}
characterSaveData.decompSave = patched
} else {
dumpSaveData(s, pkt.RawDataPayload, "savedata")
// Regular blob update.
saveData, err := nullcomp.Decompress(pkt.RawDataPayload)
saveData, err := nullcomp.DecompressWithLimit(pkt.RawDataPayload, saveDataMaxDecompressedPayload)
if err != nil {
s.logger.Error("Failed to decompress savedata from packet", zap.Error(err))
doAckSimpleFail(s, pkt.AckHandle, make([]byte, 4))
@@ -193,7 +218,7 @@ func handleMsgMhfLoaddata(s *Session, p mhfpacket.MHFPacket) {
}
doAckBufSucceed(s, pkt.AckHandle, data)
decompSaveData, err := nullcomp.Decompress(data)
decompSaveData, err := nullcomp.DecompressWithLimit(data, saveDataMaxDecompressedPayload)
if err != nil {
s.logger.Error("Failed to decompress savedata", zap.Error(err))
}

View File

@@ -652,6 +652,73 @@ func TestConcurrentSaveData_Integration(t *testing.T) {
}
}
// =============================================================================
// Tier 1 protection tests
// =============================================================================
func TestSaveDataSizeLimit(t *testing.T) {
// Verify the size constants are sensible
if saveDataMaxCompressedPayload <= 0 {
t.Error("saveDataMaxCompressedPayload must be positive")
}
if saveDataMaxDecompressedPayload <= 0 {
t.Error("saveDataMaxDecompressedPayload must be positive")
}
if saveDataMaxCompressedPayload > saveDataMaxDecompressedPayload {
t.Error("compressed limit should not exceed decompressed limit")
}
}
func TestSaveDataSizeLimitRejectsOversized(t *testing.T) {
server := createMockServer()
session := createMockSession(1, server)
// Create a payload larger than the limit
oversized := make([]byte, saveDataMaxCompressedPayload+1)
pkt := &mhfpacket.MsgMhfSavedata{
SaveType: 0,
AckHandle: 1234,
AllocMemSize: uint32(len(oversized)),
DataSize: uint32(len(oversized)),
RawDataPayload: oversized,
}
// This should return early with a fail ACK, not panic
defer func() {
if r := recover(); r != nil {
t.Errorf("handleMsgMhfSavedata panicked on oversized payload: %v", r)
}
}()
handleMsgMhfSavedata(session, pkt)
}
func TestSaveDataSizeLimitAcceptsNormalPayload(t *testing.T) {
// Verify a normal-sized payload passes the size check
normalSize := 100000 // 100KB - typical save
if normalSize > saveDataMaxCompressedPayload {
t.Errorf("normal save size %d exceeds limit %d", normalSize, saveDataMaxCompressedPayload)
}
}
func TestDecompressWithLimitConstants(t *testing.T) {
// Verify limits are consistent with known save sizes
// ZZ save is ~147KB decompressed; limit should be well above that
zzSaveSize := 150000
if saveDataMaxDecompressedPayload < zzSaveSize*2 {
t.Errorf("decompressed limit %d is too close to known ZZ save size %d",
saveDataMaxDecompressedPayload, zzSaveSize)
}
}
func TestBackupConstants(t *testing.T) {
if saveBackupSlots <= 0 {
t.Error("saveBackupSlots must be positive")
}
if saveBackupInterval <= 0 {
t.Error("saveBackupInterval must be positive")
}
}
// =============================================================================
// Tests consolidated from handlers_coverage4_test.go
// =============================================================================

View File

@@ -77,7 +77,7 @@ func handleMsgMhfSavePlateData(s *Session, p mhfpacket.MHFPacket) {
if len(data) > 0 {
// Decompress
s.logger.Debug("Decompressing PlateData", zap.Int("compressed_size", len(data)))
data, err = nullcomp.Decompress(data)
data, err = nullcomp.DecompressWithLimit(data, plateDataMaxPayload)
if err != nil {
s.logger.Error("Failed to decompress platedata",
zap.Error(err),
@@ -91,9 +91,18 @@ func handleMsgMhfSavePlateData(s *Session, p mhfpacket.MHFPacket) {
data = make([]byte, plateDataEmptySize)
}
// Perform diff and compress it to write back to db
// Perform diff with bounds checking and compress it to write back to db
s.logger.Debug("Applying PlateData diff", zap.Int("base_size", len(data)))
saveOutput, err := nullcomp.Compress(deltacomp.ApplyDataDiff(pkt.RawDataPayload, data))
patched, err := deltacomp.ApplyDataDiffWithLimit(pkt.RawDataPayload, data, plateDataMaxPayload)
if err != nil {
s.logger.Error("Failed to apply platedata diff",
zap.Error(err),
zap.Uint32("charID", s.charID),
)
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
return
}
saveOutput, err := nullcomp.Compress(patched)
if err != nil {
s.logger.Error("Failed to diff and compress platedata",
zap.Error(err),
@@ -173,7 +182,7 @@ func handleMsgMhfSavePlateBox(s *Session, p mhfpacket.MHFPacket) {
if len(data) > 0 {
// Decompress
s.logger.Info("Decompressing...")
data, err = nullcomp.Decompress(data)
data, err = nullcomp.DecompressWithLimit(data, plateBoxMaxPayload)
if err != nil {
s.logger.Error("Failed to decompress platebox", zap.Error(err))
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
@@ -184,9 +193,15 @@ func handleMsgMhfSavePlateBox(s *Session, p mhfpacket.MHFPacket) {
data = make([]byte, plateBoxEmptySize)
}
// Perform diff and compress it to write back to db
// Perform diff with bounds checking and compress it to write back to db
s.logger.Info("Diffing...")
saveOutput, err := nullcomp.Compress(deltacomp.ApplyDataDiff(pkt.RawDataPayload, data))
patched, err := deltacomp.ApplyDataDiffWithLimit(pkt.RawDataPayload, data, plateBoxMaxPayload)
if err != nil {
s.logger.Error("Failed to apply platebox diff", zap.Error(err))
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
return
}
saveOutput, err := nullcomp.Compress(patched)
if err != nil {
s.logger.Error("Failed to diff and compress platebox", zap.Error(err))
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})

View File

@@ -134,7 +134,7 @@ func (save *CharacterSaveData) Compress() error {
func (save *CharacterSaveData) Decompress() error {
var err error
save.decompSave, err = nullcomp.Decompress(save.compSave)
save.decompSave, err = nullcomp.DecompressWithLimit(save.compSave, saveDataMaxDecompressedPayload)
if err != nil {
return err
}

View File

@@ -212,6 +212,32 @@ func (r *CharacterRepository) UpdateGCPAndPact(charID uint32, gcp uint32, pactID
return err
}
// SaveBackup upserts a savedata snapshot into the rotating backup table.
func (r *CharacterRepository) SaveBackup(charID uint32, slot int, data []byte) error {
_, err := r.db.Exec(`
INSERT INTO savedata_backups (char_id, slot, savedata, saved_at)
VALUES ($1, $2, $3, now())
ON CONFLICT (char_id, slot) DO UPDATE SET savedata = $3, saved_at = now()
`, charID, slot, data)
return err
}
// GetLastBackupTime returns the most recent backup timestamp for a character.
// Returns the zero time if no backups exist.
func (r *CharacterRepository) GetLastBackupTime(charID uint32) (time.Time, error) {
var t sql.NullTime
err := r.db.QueryRow(
"SELECT MAX(saved_at) FROM savedata_backups WHERE char_id = $1", charID,
).Scan(&t)
if err != nil {
return time.Time{}, err
}
if !t.Valid {
return time.Time{}, nil
}
return t.Time, nil
}
// FindByRastaID looks up name and id by rasta_id.
func (r *CharacterRepository) FindByRastaID(rastaID int) (charID uint32, name string, err error) {
err = r.db.QueryRow("SELECT name, id FROM characters WHERE rasta_id=$1", rastaID).Scan(&name, &charID)

View File

@@ -39,6 +39,8 @@ type CharacterRepo interface {
SaveCharacterData(charID uint32, compSave []byte, hr, gr uint16, isFemale bool, weaponType uint8, weaponID uint16) error
SaveHouseData(charID uint32, houseTier []byte, houseData, bookshelf, gallery, tore, garden []byte) error
LoadSaveData(charID uint32) (uint32, []byte, bool, string, error)
SaveBackup(charID uint32, slot int, data []byte) error
GetLastBackupTime(charID uint32) (time.Time, error)
}
// GuildRepo defines the contract for guild data access.

View File

@@ -228,6 +228,8 @@ func (m *mockCharacterRepo) SaveHouseData(_ uint32, _ []byte, _, _, _, _, _ []by
func (m *mockCharacterRepo) LoadSaveData(_ uint32) (uint32, []byte, bool, string, error) {
return m.loadSaveDataID, m.loadSaveDataData, m.loadSaveDataNew, m.loadSaveDataName, m.loadSaveDataErr
}
func (m *mockCharacterRepo) SaveBackup(_ uint32, _ int, _ []byte) error { return nil }
func (m *mockCharacterRepo) GetLastBackupTime(_ uint32) (time.Time, error) { return time.Time{}, nil }
// --- mockGoocooRepo ---