feat(db): add embedded auto-migrating schema system

Replace 4 independent schema management code paths (Docker shell
script, setup wizard pg_restore, test helpers, manual psql) with a
single migration runner embedded in the server binary.

The new server/migrations/ package uses Go embed to bundle all SQL
schemas. On startup, Migrate() creates a schema_version tracking
table, detects existing databases (auto-marks baseline as applied),
and runs pending migrations in transactions.

Key changes:
- Consolidated init.sql + 9.2-update + 33 patches into 0001_init.sql
- Setup wizard simplified to single "Apply schema" checkbox
- Test helpers use migrations.Migrate() instead of pg_restore
- Docker no longer needs schema volume mounts or init script
- Seed data (shops, events, gacha) embedded and applied via API
- Future migrations just add 0002_*.sql files — no manual steps
This commit is contained in:
Houmgaor
2026-02-23 21:19:21 +01:00
parent 6a7db47723
commit 27fb0faa1e
62 changed files with 4736 additions and 932 deletions

View File

@@ -1,13 +1,14 @@
package setup
import (
"database/sql"
"embed"
"encoding/json"
"fmt"
"net/http"
"path/filepath"
"erupe-ce/server/migrations"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"go.uber.org/zap"
)
@@ -73,16 +74,14 @@ func (ws *wizardServer) handleTestDB(w http.ResponseWriter, r *http.Request) {
// initDBRequest is the JSON body for POST /api/setup/init-db.
type initDBRequest struct {
Host string `json:"host"`
Port int `json:"port"`
User string `json:"user"`
Password string `json:"password"`
DBName string `json:"dbName"`
CreateDB bool `json:"createDB"`
ApplyInit bool `json:"applyInit"`
ApplyUpdate bool `json:"applyUpdate"`
ApplyPatch bool `json:"applyPatch"`
ApplyBundled bool `json:"applyBundled"`
Host string `json:"host"`
Port int `json:"port"`
User string `json:"user"`
Password string `json:"password"`
DBName string `json:"dbName"`
CreateDB bool `json:"createDB"`
ApplySchema bool `json:"applySchema"`
ApplyBundled bool `json:"applyBundled"`
}
func (ws *wizardServer) handleInitDB(w http.ResponseWriter, r *http.Request) {
@@ -108,23 +107,12 @@ func (ws *wizardServer) handleInitDB(w http.ResponseWriter, r *http.Request) {
addLog("Database created successfully")
}
if req.ApplyInit {
addLog("Applying init schema (pg_restore)...")
if err := applyInitSchema(req.Host, req.Port, req.User, req.Password, req.DBName); err != nil {
addLog(fmt.Sprintf("ERROR: %s", err))
writeJSON(w, http.StatusOK, map[string]interface{}{"success": false, "log": log})
return
}
addLog("Init schema applied successfully")
}
// For update/patch/bundled schemas, connect to the target DB.
if req.ApplyUpdate || req.ApplyPatch || req.ApplyBundled {
if req.ApplySchema || req.ApplyBundled {
connStr := fmt.Sprintf(
"host='%s' port='%d' user='%s' password='%s' dbname='%s' sslmode=disable",
req.Host, req.Port, req.User, req.Password, req.DBName,
)
db, err := sql.Open("postgres", connStr)
db, err := sqlx.Open("postgres", connStr)
if err != nil {
addLog(fmt.Sprintf("ERROR connecting to database: %s", err))
writeJSON(w, http.StatusOK, map[string]interface{}{"success": false, "log": log})
@@ -132,37 +120,26 @@ func (ws *wizardServer) handleInitDB(w http.ResponseWriter, r *http.Request) {
}
defer func() { _ = db.Close() }()
applyDir := func(dir, label string) bool {
addLog(fmt.Sprintf("Applying %s schemas from %s...", label, dir))
applied, err := applySQLFiles(db, filepath.Join("schemas", dir))
for _, f := range applied {
addLog(fmt.Sprintf(" Applied: %s", f))
}
if req.ApplySchema {
addLog("Running database migrations...")
applied, err := migrations.Migrate(db, ws.logger)
if err != nil {
addLog(fmt.Sprintf("ERROR: %s", err))
return false
writeJSON(w, http.StatusOK, map[string]interface{}{"success": false, "log": log})
return
}
addLog(fmt.Sprintf("%s schemas applied (%d files)", label, len(applied)))
return true
addLog(fmt.Sprintf("Schema migrations applied (%d migration(s))", applied))
}
if req.ApplyUpdate {
if !applyDir("update-schema", "update") {
writeJSON(w, http.StatusOK, map[string]interface{}{"success": false, "log": log})
return
}
}
if req.ApplyPatch {
if !applyDir("patch-schema", "patch") {
writeJSON(w, http.StatusOK, map[string]interface{}{"success": false, "log": log})
return
}
}
if req.ApplyBundled {
if !applyDir("bundled-schema", "bundled") {
addLog("Applying bundled data (shops, events, gacha)...")
applied, err := migrations.ApplySeedData(db, ws.logger)
if err != nil {
addLog(fmt.Sprintf("ERROR: %s", err))
writeJSON(w, http.StatusOK, map[string]interface{}{"success": false, "log": log})
return
}
addLog(fmt.Sprintf("Bundled data applied (%d files)", applied))
}
}

View File

@@ -6,10 +6,6 @@ import (
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
)
// clientModes returns all supported client version strings.
@@ -373,71 +369,3 @@ func createDatabase(host string, port int, user, password, dbName string) error
return nil
}
// applyInitSchema runs pg_restore to load the init.sql (PostgreSQL custom dump format).
func applyInitSchema(host string, port int, user, password, dbName string) error {
pgRestore, err := exec.LookPath("pg_restore")
if err != nil {
return fmt.Errorf("pg_restore not found in PATH: %w (install PostgreSQL client tools)", err)
}
schemaPath := filepath.Join("schemas", "init.sql")
if _, err := os.Stat(schemaPath); err != nil {
return fmt.Errorf("schema file not found: %s", schemaPath)
}
cmd := exec.Command(pgRestore,
"--host", host,
"--port", fmt.Sprint(port),
"--username", user,
"--dbname", dbName,
"--no-owner",
"--no-privileges",
schemaPath,
)
cmd.Env = append(os.Environ(), fmt.Sprintf("PGPASSWORD=%s", password))
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("pg_restore failed: %w\n%s", err, string(output))
}
return nil
}
// collectSQLFiles returns sorted .sql filenames from a directory.
func collectSQLFiles(dir string) ([]string, error) {
entries, err := os.ReadDir(dir)
if err != nil {
return nil, fmt.Errorf("reading directory %s: %w", dir, err)
}
var files []string
for _, e := range entries {
if !e.IsDir() && strings.HasSuffix(e.Name(), ".sql") {
files = append(files, e.Name())
}
}
sort.Strings(files)
return files, nil
}
// applySQLFiles executes all .sql files in a directory in sorted order.
func applySQLFiles(db *sql.DB, dir string) ([]string, error) {
files, err := collectSQLFiles(dir)
if err != nil {
return nil, err
}
var applied []string
for _, f := range files {
path := filepath.Join(dir, f)
data, err := os.ReadFile(path)
if err != nil {
return applied, fmt.Errorf("reading %s: %w", f, err)
}
_, err = db.Exec(string(data))
if err != nil {
return applied, fmt.Errorf("executing %s: %w", f, err)
}
applied = append(applied, f)
}
return applied, nil
}

View File

@@ -122,9 +122,7 @@ h1{font-size:1.75rem;margin-bottom:.5rem;color:#e94560;text-align:center}
<p style="font-size:.85rem;color:#888;margin-bottom:1rem">Select which schema operations to perform.</p>
<div id="schema-options">
<label class="checkbox" id="chk-create-db-label"><input type="checkbox" id="chk-create-db" checked> Create database</label>
<label class="checkbox"><input type="checkbox" id="chk-init" checked> Apply init schema (pg_restore — required for new databases)</label>
<label class="checkbox"><input type="checkbox" id="chk-update" checked> Apply update schemas</label>
<label class="checkbox"><input type="checkbox" id="chk-patch" checked> Apply patch schemas (development patches)</label>
<label class="checkbox"><input type="checkbox" id="chk-schema" checked> Apply database schema (required for new databases)</label>
<label class="checkbox"><input type="checkbox" id="chk-bundled" checked> Apply bundled data (shops, events, gacha — recommended)</label>
</div>
<button class="btn btn-primary" id="btn-init-db" onclick="initDB()">Initialize Database</button>
@@ -208,9 +206,9 @@ function updateSchemaOptions() {
createCheck.disabled = false;
createLabel.style.opacity = '1';
}
// If tables already exist, uncheck init
// If tables already exist, uncheck schema (migrations will detect and skip)
if (dbTestResult && dbTestResult.tablesExist) {
document.getElementById('chk-init').checked = false;
document.getElementById('chk-schema').checked = false;
}
}
@@ -283,9 +281,7 @@ async function initDB() {
password: document.getElementById('db-password').value,
dbName: document.getElementById('db-name').value,
createDB: document.getElementById('chk-create-db').checked,
applyInit: document.getElementById('chk-init').checked,
applyUpdate: document.getElementById('chk-update').checked,
applyPatch: document.getElementById('chk-patch').checked,
applySchema: document.getElementById('chk-schema').checked,
applyBundled: document.getElementById('chk-bundled').checked,
})
});

View File

@@ -130,44 +130,6 @@ func TestClientModes(t *testing.T) {
}
}
func TestApplySQLFiles(t *testing.T) {
// This test doesn't need a real database — we test the file reading/sorting logic
// by verifying it returns errors when the directory doesn't exist.
_, err := applySQLFiles(nil, "/nonexistent/path")
if err == nil {
t.Error("expected error for nonexistent directory")
}
}
func TestApplySQLFilesOrdering(t *testing.T) {
// Verify that collectSQLFiles returns files in sorted order and skips non-.sql files.
dir := t.TempDir()
files := []string{"03_c.sql", "01_a.sql", "02_b.sql"}
for _, f := range files {
if err := os.WriteFile(filepath.Join(dir, f), []byte("-- "+f), 0644); err != nil {
t.Fatal(err)
}
}
// Non-SQL file should be skipped
if err := os.WriteFile(filepath.Join(dir, "readme.txt"), []byte("not sql"), 0644); err != nil {
t.Fatal(err)
}
collected, err := collectSQLFiles(dir)
if err != nil {
t.Fatalf("collectSQLFiles failed: %v", err)
}
if len(collected) != 3 {
t.Fatalf("got %d files, want 3", len(collected))
}
expected := []string{"01_a.sql", "02_b.sql", "03_c.sql"}
for i, f := range collected {
if f != expected[i] {
t.Errorf("file[%d] = %q, want %q", i, f, expected[i])
}
}
}
func TestWriteConfig(t *testing.T) {
dir := t.TempDir()
origDir, _ := os.Getwd()