mirror of
https://github.com/Mezeporta/Erupe.git
synced 2026-03-22 07:32:32 +01:00
feat(savedata): add tier 1 data integrity protections
Prevent savedata corruption and denial-of-service by adding four layers of protection to the save pipeline: - Bounded decompression (nullcomp.DecompressWithLimit): caps output size to prevent OOM from crafted payloads that expand to exhaust memory - Bounds-checked delta patching (deltacomp.ApplyDataDiffWithLimit): validates offsets before writing, returns errors for negative offsets, truncated patches, and oversized output; ApplyDataDiff now returns original data on error instead of partial corruption - Size limits on save handlers: rejects compressed payloads >512KB and decompressed data >1MB before processing; applied to main savedata, platedata, and platebox diff paths - Rotating savedata backups: 3 slots per character with 30-minute interval, snapshots the previous state before overwriting, backed by new savedata_backups table (migration 0007)
This commit is contained in:
@@ -2,6 +2,7 @@ package deltacomp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"go.uber.org/zap"
|
||||
@@ -49,8 +50,21 @@ func readCount(r *bytes.Reader) (int, error) {
|
||||
|
||||
// ApplyDataDiff applies a delta data diff patch onto given base data.
|
||||
func ApplyDataDiff(diff []byte, baseData []byte) []byte {
|
||||
// Make a copy of the base data to return,
|
||||
// (probably just make this modify the given slice in the future).
|
||||
result, err := ApplyDataDiffWithLimit(diff, baseData, 0)
|
||||
if err != nil {
|
||||
zap.L().Error("ApplyDataDiff failed", zap.Error(err))
|
||||
// Return original data on error to avoid corruption
|
||||
out := make([]byte, len(baseData))
|
||||
copy(out, baseData)
|
||||
return out
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ApplyDataDiffWithLimit applies a delta data diff patch onto given base data.
|
||||
// If maxOutput > 0, the result is capped at that size; exceeding it returns an error.
|
||||
// If maxOutput == 0, no limit is enforced (backwards-compatible behavior).
|
||||
func ApplyDataDiffWithLimit(diff []byte, baseData []byte, maxOutput int) ([]byte, error) {
|
||||
baseCopy := make([]byte, len(baseData))
|
||||
copy(baseCopy, baseData)
|
||||
|
||||
@@ -76,32 +90,35 @@ func ApplyDataDiff(diff []byte, baseData []byte) []byte {
|
||||
}
|
||||
differentCount--
|
||||
|
||||
// Grow slice if it's required
|
||||
if len(baseCopy) < dataOffset {
|
||||
zap.L().Warn("Slice smaller than data offset, growing slice")
|
||||
baseCopy = append(baseCopy, make([]byte, (dataOffset+differentCount)-len(baseData))...)
|
||||
} else {
|
||||
length := len(baseCopy[dataOffset:])
|
||||
if length < differentCount {
|
||||
length -= differentCount
|
||||
baseCopy = append(baseCopy, make([]byte, length)...)
|
||||
}
|
||||
if dataOffset < 0 {
|
||||
return nil, fmt.Errorf("negative data offset %d", dataOffset)
|
||||
}
|
||||
if differentCount < 0 {
|
||||
return nil, fmt.Errorf("negative different count %d at offset %d", differentCount, dataOffset)
|
||||
}
|
||||
|
||||
endOffset := dataOffset + differentCount
|
||||
if maxOutput > 0 && endOffset > maxOutput {
|
||||
return nil, fmt.Errorf("patch writes to offset %d, exceeds limit %d", endOffset, maxOutput)
|
||||
}
|
||||
|
||||
// Grow slice if required
|
||||
if endOffset > len(baseCopy) {
|
||||
baseCopy = append(baseCopy, make([]byte, endOffset-len(baseCopy))...)
|
||||
}
|
||||
|
||||
// Apply the patch bytes.
|
||||
for i := 0; i < differentCount; i++ {
|
||||
b, err := checkReadUint8(patch)
|
||||
if err != nil {
|
||||
zap.L().Error("Invalid or misunderstood patch format", zap.Int("dataOffset", dataOffset))
|
||||
return baseCopy
|
||||
return nil, fmt.Errorf("truncated patch at offset %d+%d: %w", dataOffset, i, err)
|
||||
}
|
||||
|
||||
baseCopy[dataOffset+i] = b
|
||||
}
|
||||
|
||||
dataOffset += differentCount - 1
|
||||
|
||||
}
|
||||
|
||||
return baseCopy
|
||||
return baseCopy, nil
|
||||
}
|
||||
|
||||
@@ -75,6 +75,83 @@ func readTestDataFile(filename string) []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
func TestApplyDataDiffWithLimit_BoundsCheck(t *testing.T) {
|
||||
// Base data: 10 bytes
|
||||
baseData := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A}
|
||||
|
||||
// Build a patch that tries to write at offset 8 with 5 different bytes,
|
||||
// which would extend to offset 13 (beyond 10-byte base).
|
||||
// Format: matchCount=9 (first is +1), differentCount=6 (is -1 = 5 bytes)
|
||||
diff := []byte{
|
||||
0x09, // matchCount (first is +1, so offset becomes -1+9=8)
|
||||
0x06, // differentCount (6-1=5 different bytes)
|
||||
0xAA, 0xBB, 0xCC, 0xDD, 0xEE, // 5 patch bytes
|
||||
}
|
||||
|
||||
t.Run("within_limit", func(t *testing.T) {
|
||||
// Limit of 20 allows the growth
|
||||
result, err := ApplyDataDiffWithLimit(diff, baseData, 20)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(result) < 13 {
|
||||
t.Errorf("expected result length >= 13, got %d", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("exceeds_limit", func(t *testing.T) {
|
||||
// Limit of 10 doesn't allow writing past the base
|
||||
_, err := ApplyDataDiffWithLimit(diff, baseData, 10)
|
||||
if err == nil {
|
||||
t.Error("expected error for write past limit, got none")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no_limit", func(t *testing.T) {
|
||||
// maxOutput=0 means no limit (backwards compatible)
|
||||
result, err := ApplyDataDiffWithLimit(diff, baseData, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(result) < 13 {
|
||||
t.Errorf("expected result length >= 13, got %d", len(result))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplyDataDiffWithLimit_TruncatedPatch(t *testing.T) {
|
||||
baseData := []byte{0x01, 0x02, 0x03, 0x04}
|
||||
|
||||
// Patch claims 3 different bytes but only provides 1
|
||||
diff := []byte{
|
||||
0x02, // matchCount (offset = -1+2 = 1)
|
||||
0x04, // differentCount (4-1=3 different bytes)
|
||||
0xAA, // only 1 byte provided (missing 2)
|
||||
}
|
||||
|
||||
_, err := ApplyDataDiffWithLimit(diff, baseData, 100)
|
||||
if err == nil {
|
||||
t.Error("expected error for truncated patch, got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyDataDiff_ReturnsOriginalOnError(t *testing.T) {
|
||||
baseData := []byte{0x01, 0x02, 0x03, 0x04}
|
||||
|
||||
// Truncated patch
|
||||
diff := []byte{
|
||||
0x02,
|
||||
0x04,
|
||||
0xAA, // only 1 of 3 expected bytes
|
||||
}
|
||||
|
||||
result := ApplyDataDiff(diff, baseData)
|
||||
// On error, ApplyDataDiff should return the original data unchanged
|
||||
if !bytes.Equal(result, baseData) {
|
||||
t.Errorf("expected original data on error, got %v", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeltaPatch(t *testing.T) {
|
||||
for k, tt := range tests {
|
||||
testname := fmt.Sprintf("delta_patch_test_%d", k)
|
||||
|
||||
@@ -2,6 +2,7 @@ package nullcomp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
@@ -49,6 +50,61 @@ func Decompress(compData []byte) ([]byte, error) {
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// DecompressWithLimit decompresses null-compressed data, returning an error if
|
||||
// the decompressed output would exceed maxOutput bytes. This prevents
|
||||
// denial-of-service via crafted payloads that expand to exhaust memory.
|
||||
func DecompressWithLimit(compData []byte, maxOutput int) ([]byte, error) {
|
||||
r := bytes.NewReader(compData)
|
||||
|
||||
header := make([]byte, 16)
|
||||
n, err := r.Read(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if n != len(header) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Just return the data if it doesn't contain the cmp header.
|
||||
if !bytes.Equal(header, []byte("cmp\x2020110113\x20\x20\x20\x00")) {
|
||||
if len(compData) > maxOutput {
|
||||
return nil, fmt.Errorf("uncompressed data size %d exceeds limit %d", len(compData), maxOutput)
|
||||
}
|
||||
return compData, nil
|
||||
}
|
||||
|
||||
var output []byte
|
||||
for {
|
||||
b, err := r.ReadByte()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if b == 0 {
|
||||
// If it's a null byte, then the next byte is how many nulls to add.
|
||||
nullCount, err := r.ReadByte()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(output)+int(nullCount) > maxOutput {
|
||||
return nil, fmt.Errorf("decompressed size exceeds limit %d", maxOutput)
|
||||
}
|
||||
output = append(output, make([]byte, int(nullCount))...)
|
||||
} else {
|
||||
if len(output)+1 > maxOutput {
|
||||
return nil, fmt.Errorf("decompressed size exceeds limit %d", maxOutput)
|
||||
}
|
||||
output = append(output, b)
|
||||
}
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// Compress null compresses give given data.
|
||||
func Compress(rawData []byte) ([]byte, error) {
|
||||
r := bytes.NewReader(rawData)
|
||||
|
||||
@@ -362,6 +362,110 @@ func TestDecompress_EdgeCases(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// === DecompressWithLimit tests ===
|
||||
|
||||
func TestDecompressWithLimit_RespectsLimit(t *testing.T) {
|
||||
// Compress data that decompresses to 1000 bytes
|
||||
input := make([]byte, 1000)
|
||||
for i := range input {
|
||||
input[i] = byte(i % 256)
|
||||
}
|
||||
compressed, err := Compress(input)
|
||||
if err != nil {
|
||||
t.Fatalf("Compress() error = %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
maxOutput int
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "limit larger than data",
|
||||
maxOutput: 2000,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "limit equal to data",
|
||||
maxOutput: 1000,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "limit smaller than data",
|
||||
maxOutput: 500,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit of 1",
|
||||
maxOutput: 1,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := DecompressWithLimit(compressed, tt.maxOutput)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Error("expected error but got none")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(result, input) {
|
||||
t.Error("decompressed data doesn't match original")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecompressWithLimit_NullExpansionBomb(t *testing.T) {
|
||||
// Craft a payload that would expand to a huge size via null runs:
|
||||
// header + 0x00 0xFF repeated many times
|
||||
var payload []byte
|
||||
payload = append(payload, []byte("cmp\x2020110113\x20\x20\x20\x00")...)
|
||||
for i := 0; i < 1000; i++ {
|
||||
payload = append(payload, 0x00, 0xFF) // each pair = 255 null bytes
|
||||
}
|
||||
// Total decompressed would be 255,000 bytes
|
||||
|
||||
_, err := DecompressWithLimit(payload, 10000)
|
||||
if err == nil {
|
||||
t.Error("expected error for null expansion bomb, got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecompressWithLimit_UncompressedDataExceedsLimit(t *testing.T) {
|
||||
// Data without cmp header that exceeds the limit
|
||||
data := make([]byte, 100)
|
||||
for i := range data {
|
||||
data[i] = byte(i + 1) // non-zero so it's not confused with compressed
|
||||
}
|
||||
|
||||
_, err := DecompressWithLimit(data, 50)
|
||||
if err == nil {
|
||||
t.Error("expected error for uncompressed data exceeding limit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecompressWithLimit_RoundTrip(t *testing.T) {
|
||||
input := []byte("Hello\x00\x00\x00World\x00\x00End")
|
||||
compressed, err := Compress(input)
|
||||
if err != nil {
|
||||
t.Fatalf("Compress() error = %v", err)
|
||||
}
|
||||
|
||||
result, err := DecompressWithLimit(compressed, 1024)
|
||||
if err != nil {
|
||||
t.Fatalf("DecompressWithLimit() error = %v", err)
|
||||
}
|
||||
if !bytes.Equal(result, input) {
|
||||
t.Errorf("round trip failed: got %v, want %v", result, input)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompress(b *testing.B) {
|
||||
data := make([]byte, 10000)
|
||||
// Fill with some pattern (half nulls, half data)
|
||||
|
||||
Reference in New Issue
Block a user