From 9a79494c6877e23679cebb197ccc8e73b210491d Mon Sep 17 00:00:00 2001 From: kdletters Date: Mon, 27 Apr 2026 14:54:26 +0800 Subject: [PATCH 1/8] feat: add spacetimedb json migration tooling --- ...N_STRING_MIGRATION_PROCEDURE_2026-04-27.md | 196 ++++++ scripts/deploy-rust-remote.sh | 66 +- scripts/dev-rust-stack.sh | 45 ++ ...spacetime-authorize-migration-operator.mjs | 35 + scripts/spacetime-export-migration-json.mjs | 55 ++ scripts/spacetime-import-migration-json.mjs | 60 ++ scripts/spacetime-migration-common.mjs | 337 +++++++++ scripts/spacetime-publish-maincloud.sh | 51 ++ .../spacetime-revoke-migration-operator.mjs | 33 + server-rs/Cargo.lock | 1 + server-rs/crates/spacetime-module/Cargo.toml | 1 + server-rs/crates/spacetime-module/src/lib.rs | 6 +- .../crates/spacetime-module/src/migration.rs | 648 ++++++++++++++++++ 13 files changed, 1532 insertions(+), 2 deletions(-) create mode 100644 docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md create mode 100644 scripts/spacetime-authorize-migration-operator.mjs create mode 100644 scripts/spacetime-export-migration-json.mjs create mode 100644 scripts/spacetime-import-migration-json.mjs create mode 100644 scripts/spacetime-migration-common.mjs create mode 100644 scripts/spacetime-revoke-migration-operator.mjs create mode 100644 server-rs/crates/spacetime-module/src/migration.rs diff --git a/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md b/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md new file mode 100644 index 00000000..51b56153 --- /dev/null +++ b/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md @@ -0,0 +1,196 @@ +# SpacetimeDB JSON 字符串迁移 procedure 设计 + +## 背景 + +`spacetime sql` 只能稳定读取 public 表或数据库 owner 可见表。当前 `ai_result_reference` 等运行真相表保持 private,直接 SQL 导出会遇到 `no such table` 或 private table 提示,不能作为跨服务器迁移的稳定方案。 + +SpacetimeDB reducer 必须保持确定性,不能访问文件系统和网络。procedure 可以返回数据,也可以在事务中读取 private 表,因此迁移改为: + +1. `spacetime-module` 内的导出 procedure 读取迁移白名单表,并直接返回迁移 JSON 字符串。 +2. Node 运维脚本默认通过 `spacetime call` 调用导出 procedure,把返回的 JSON 字符串写入本地文件。 +3. Node 运维脚本读取本地 JSON 文件内容,并通过 HTTP request body 作为字符串参数传给导入 procedure。 +4. 导入 procedure 校验 JSON 与表白名单后,在事务中写入目标数据库。 + +procedure 不再访问 HTTP 文件桥,也不接收部署机本地文件路径。这样可以避开 SpacetimeDB 对 private/special-purpose 地址的 HTTP 访问限制,并避免把 private 表内容通过临时 HTTP 服务转发。 + +`spacetime login show --token` 输出的是 CLI 登录 token,不是 HTTP `/v1/database/.../call` 所需的数据库连接 token。运维脚本默认走 CLI 登录态,迁移时不要把 CLI token 传给 `--token`;只有显式传 `--use-http` 时才需要数据库连接 token。 + +## 接口 + +### 迁移操作员授权 + +迁移 procedure 会读取并写入 private 表,不能对任意登录身份开放。模块内新增私有表 `database_migration_operator` 作为迁移操作员白名单: + +- `operator_identity`: 被授权调用迁移 procedure 的 SpacetimeDB identity。 +- `created_at`: 授权写入时间。 +- `created_by`: 发起授权的 identity。 +- `note`: 运维备注,只用于区分来源、环境或临时用途。 + +`database_migration_operator` 只控制迁移 procedure 调用权限,不会被导出或导入,避免把源库的运维权限复制到目标库。 + +首次授权时,操作员表为空,必须通过编译进模块的 `GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET` 引导密钥授权第一位操作员。发布脚本会在构建或发布 SpacetimeDB 模块时自动生成一份强随机引导密钥、注入 wasm 编译环境,并在控制台显示;运维人员必须记录对应数据库本次发布输出的密钥。表内已经存在操作员后,后续授权与撤销只能由已有操作员发起;此时不再接受引导密钥越权扩权。 + +新增 procedure: + +- `authorize_database_migration_operator`: 授权或更新迁移操作员备注。 +- `revoke_database_migration_operator`: 撤销迁移操作员。 + +运维流程: + +```bash +npm run spacetime:publish:maincloud -- --database +# 控制台会输出: +# [spacetime:maincloud] 迁移引导密钥: <本次发布随机密钥> +``` + +发布完成后,在同一台机器上用当前 `spacetime login` 身份授权操作员: + +```bash +node scripts/spacetime-authorize-migration-operator.mjs \ + --server maincloud \ + --database xushi-p4wfr \ + --bootstrap-secret <本次发布随机密钥> \ + --operator-identity \ + --note "2026-04-27 migration" +``` + +迁移完成后可以撤销临时操作员: + +```bash +node scripts/spacetime-revoke-migration-operator.mjs \ + --server maincloud \ + --database xushi-p4wfr \ + --operator-identity +``` + +生产环境建议迁移完成后用 `--no-migration-bootstrap-secret` 重新发布一个未设置 `GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET` 的模块版本,避免引导密钥长期留在 wasm 中。 + +### 发布脚本密钥行为 + +当前所有会构建或发布 `spacetime-module` 的脚本默认都会生成并显示迁移引导密钥: + +- `npm run spacetime:publish:maincloud`:在本机 `cargo build` 前生成密钥,控制台输出 `[spacetime:maincloud] 迁移引导密钥: ...`。 +- `npm run dev:rust`:在本地 `spacetime publish --module-path` 前生成密钥,控制台输出 `[dev:rust] 迁移引导密钥: ...`。 +- `npm run deploy:rust:remote`:在构建发布包 wasm 前生成密钥,控制台输出 `[deploy:rust] 迁移引导密钥: ...`,并把同一份密钥写入发布包根目录的 `migration-bootstrap-secret.txt`。服务器执行 `./start.sh` 发布 wasm 时也会再次显示该文件里的密钥。 + +如果迁移完成后不希望 wasm 继续携带引导密钥,重新发布时传 `--no-migration-bootstrap-secret`。远端发布包若使用 `--skip-spacetime-build`,必须同时传 `--no-migration-bootstrap-secret`,否则脚本会拒绝生成一个无法注入旧 wasm 的新密钥。 + +### 导出 + +`export_database_migration_to_file(ctx, input)` + +输入字段: + +- `include_tables`: 可选表名白名单。为空时导出当前实现支持的全部迁移表。 + +返回字段: + +- `ok`: 是否成功。 +- `schema_version`: 迁移 JSON 结构版本。 +- `migration_json`: 成功时包含完整迁移 JSON 字符串,失败时为空。 +- `table_stats`: 表级导出统计。 +- `error_message`: 失败原因。 + +### 导入 + +`import_database_migration_from_file(ctx, input)` + +输入字段: + +- `migration_json`: 导出 procedure 生成的完整迁移 JSON 字符串。 +- `include_tables`: 可选表名白名单。为空时导入文件内所有支持表。 +- `replace_existing`: 是否先清空目标表。跨服务器全量迁移必须为 `true`。 +- `dry_run`: 只解析和统计,不写表。 + +返回字段: + +- `ok`: 是否成功。 +- `schema_version`: 迁移 JSON 结构版本。 +- `migration_json`: 导入场景恒为空,避免重复回传大 JSON。 +- `table_stats`: 表级导入或跳过统计。 +- `error_message`: 失败原因。 + +保留 `export_database_migration_to_file` / `import_database_migration_from_file` 名称,是为了减少已经记住的 procedure 名变更;语义上不再代表 module 直接读写文件。 + +## Node 脚本 + +本机导出时,先确保本机 SpacetimeDB 服务和源数据库可访问,然后授权本机调用身份: + +```bash +node scripts/spacetime-authorize-migration-operator.mjs \ + --server dev \ + --database xushi-p4wfr \ + --bootstrap-secret <本机源库发布时输出的随机密钥> \ + --operator-identity <本机 spacetime login show 中的 identity> \ + --note "local export" +``` + +导出脚本负责调用本机源库 procedure 并保存返回 JSON: + +```bash +node scripts/spacetime-export-migration-json.mjs \ + --server dev \ + --database xushi-p4wfr \ + --out tmp/spacetime-migrations/source-2026-04-27.json +``` + +把 `tmp/spacetime-migrations/source-2026-04-27.json` 复制到服务器后,在服务器上登录目标 SpacetimeDB,并授权服务器侧调用身份: + +```bash +node scripts/spacetime-authorize-migration-operator.mjs \ + --server maincloud \ + --database xushi-p4wfr \ + --bootstrap-secret <服务器目标库发布时输出的随机密钥> \ + --operator-identity <服务器 spacetime login show 中的 identity> \ + --note "server import" +``` + +导入脚本负责读取服务器本地文件并把 JSON 字符串传入目标库 procedure: + +```bash +node scripts/spacetime-import-migration-json.mjs \ + --server maincloud \ + --database xushi-p4wfr \ + --in tmp/spacetime-migrations/source-2026-04-27.json \ + --replace-existing +``` + +正式导入前建议先加 `--dry-run`,确认 JSON 可解析、版本匹配、表名都在迁移白名单内。 + +如需分批迁移,可用逗号分隔表名: + +```bash +node scripts/spacetime-export-migration-json.mjs \ + --database xushi-p4wfr \ + --out tmp/spacetime-migrations/ai.json \ + --include ai_task,ai_task_stage,ai_text_chunk,ai_result_reference +``` + +`--server` 支持 `dev`、`local`、`maincloud`,也可以直接传 SpacetimeDB 服务器 URL。脚本默认走 `spacetime call`,使用当前机器的 CLI 登录态。数据库名可通过 `--database`、`GENARRATIVE_SPACETIME_MAINCLOUD_DATABASE` 或 `GENARRATIVE_SPACETIME_DATABASE` 提供。 + +授权脚本额外支持: + +- `--bootstrap-secret` 或 `GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET` +- `--operator-identity` 或 `GENARRATIVE_SPACETIME_MIGRATION_OPERATOR_IDENTITY` +- `--note` + +## 表范围 + +首版覆盖当前 private table 报错相关与主运行真相表: + +- 认证:`auth_store_snapshot`、`user_account`、`auth_identity`、`refresh_session` +- AI:`ai_task`、`ai_task_stage`、`ai_text_chunk`、`ai_result_reference` +- 运行存档与账户投影:`runtime_snapshot`、`runtime_setting`、`user_browse_history`、`profile_dashboard_state`、`profile_wallet_ledger`、`profile_invite_code`、`profile_referral_relation`、`profile_played_world`、`profile_membership`、`profile_recharge_order`、`profile_save_archive` +- RPG 运行真相:`player_progression`、`chapter_progression`、`npc_state`、`story_session`、`story_event`、`inventory_slot`、`battle_state`、`treasure_record`、`quest_record`、`quest_log` +- 自定义世界:`custom_world_profile`、`custom_world_session`、`custom_world_agent_session`、`custom_world_agent_message`、`custom_world_agent_operation`、`custom_world_draft_card`、`custom_world_gallery_entry` +- 资产索引:`asset_object`、`asset_entity_binding` +- 拼图:`puzzle_agent_session`、`puzzle_agent_message`、`puzzle_work_profile`、`puzzle_runtime_run` +- 大鱼:`big_fish_creation_session`、`big_fish_agent_message`、`big_fish_asset_slot`、`big_fish_runtime_run` + +后续新增 SpacetimeDB 表时,必须同步把表加入迁移白名单与本文档。 + +## 风险与限制 + +迁移 JSON 作为 procedure 返回值和 HTTP request body 传递,会受 SpacetimeDB 调用响应体、请求体以及中间代理大小限制。数据量较大时,先按 `include_tables` 分批迁移;若单表本身过大,再补充分片 procedure,而不是恢复 HTTP 文件桥。 + +`spacetime call` 在 PowerShell 中手写 JSON 容易被剥掉双引号。推荐使用仓库里的 Node 脚本,由脚本直接走 HTTP API,避免 shell 二次处理和命令行长度限制。 diff --git a/scripts/deploy-rust-remote.sh b/scripts/deploy-rust-remote.sh index a8b8b822..954bde1d 100644 --- a/scripts/deploy-rust-remote.sh +++ b/scripts/deploy-rust-remote.sh @@ -27,7 +27,8 @@ usage() { --skip-upload 只生成本地发布包,不上传服务器 --skip-web-build 跳过 Vite 构建,仅用于调试 --skip-api-build 跳过 api-server 构建,仅用于调试 - --skip-spacetime-build 跳过 wasm 构建,仅用于调试 + --skip-spacetime-build 跳过 wasm 构建,仅用于调试;此时必须同时传 --no-migration-bootstrap-secret + --no-migration-bootstrap-secret 构建不带迁移引导密钥的 spacetime-module wasm 目标服务器要求: Ubuntu x86_64,已安装 node、spacetime CLI,并允许执行目标目录内的 start.sh / stop.sh。 @@ -127,6 +128,36 @@ replace_placeholder_in_file() { sed -i "s|${placeholder}|${escaped_value}|g" "${file_path}" } +generate_migration_bootstrap_secret() { + node -e 'const crypto = require("crypto"); process.stdout.write(crypto.randomBytes(32).toString("hex"));' +} + +prepare_migration_bootstrap_secret() { + case "${MIGRATION_BOOTSTRAP_SECRET_MODE}" in + auto) + MIGRATION_BOOTSTRAP_SECRET="$(generate_migration_bootstrap_secret)" + ;; + manual) + if [[ "${#MIGRATION_BOOTSTRAP_SECRET}" -lt 16 ]]; then + echo "[deploy:rust] 迁移引导密钥至少需要 16 个字符。" >&2 + exit 1 + fi + ;; + disabled) + unset GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET + echo "[deploy:rust] 未启用迁移引导密钥。" + return + ;; + *) + echo "[deploy:rust] 未知迁移引导密钥模式: ${MIGRATION_BOOTSTRAP_SECRET_MODE}" >&2 + exit 1 + ;; + esac + + export GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET="${MIGRATION_BOOTSTRAP_SECRET}" + echo "[deploy:rust] 迁移引导密钥: ${MIGRATION_BOOTSTRAP_SECRET}" +} + SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="$(cd -- "${SCRIPT_DIR}/.." && pwd)" SERVER_RS_DIR="${REPO_ROOT}/server-rs" @@ -147,6 +178,8 @@ SKIP_WEB_BUILD=0 SKIP_API_BUILD=0 SKIP_SPACETIME_BUILD=0 BUILD_COMPLETED=0 +MIGRATION_BOOTSTRAP_SECRET="" +MIGRATION_BOOTSTRAP_SECRET_MODE="auto" while [[ $# -gt 0 ]]; do case "$1" in @@ -214,6 +247,16 @@ while [[ $# -gt 0 ]]; do SKIP_SPACETIME_BUILD=1 shift ;; + --migration-bootstrap-secret) + MIGRATION_BOOTSTRAP_SECRET="${2:?缺少 --migration-bootstrap-secret 的值}" + MIGRATION_BOOTSTRAP_SECRET_MODE="manual" + shift 2 + ;; + --no-migration-bootstrap-secret) + MIGRATION_BOOTSTRAP_SECRET="" + MIGRATION_BOOTSTRAP_SECRET_MODE="disabled" + shift + ;; *) echo "[deploy:rust] 未知参数: $1" >&2 usage >&2 @@ -227,6 +270,12 @@ if [[ ! "${BUILD_NAME}" =~ ^[0-9A-Za-z._-]+$ ]]; then exit 1 fi +if [[ "${SKIP_SPACETIME_BUILD}" -eq 1 && "${MIGRATION_BOOTSTRAP_SECRET_MODE}" != "disabled" ]]; then + echo "[deploy:rust] --skip-spacetime-build 无法把迁移引导密钥注入 wasm。" >&2 + echo "[deploy:rust] 请移除 --skip-spacetime-build,或同时传 --no-migration-bootstrap-secret。" >&2 + exit 1 +fi + TARGET_DIR="${BUILD_ROOT}/${BUILD_NAME}" WEB_DIR="${TARGET_DIR}/web" API_BINARY_SOURCE="${SERVER_RS_DIR}/target/x86_64-unknown-linux-gnu/release/api-server" @@ -249,6 +298,8 @@ fi require_command node require_command cargo +prepare_migration_bootstrap_secret + if [[ "${SKIP_WEB_BUILD}" -ne 1 ]]; then require_command npm fi @@ -310,6 +361,11 @@ fi copy_required_file "${WASM_SOURCE}" "${TARGET_DIR}/spacetime_module.wasm" "spacetime-module wasm" +if [[ "${MIGRATION_BOOTSTRAP_SECRET_MODE}" != "disabled" ]]; then + printf "%s\n" "${MIGRATION_BOOTSTRAP_SECRET}" >"${TARGET_DIR}/migration-bootstrap-secret.txt" + chmod 600 "${TARGET_DIR}/migration-bootstrap-secret.txt" +fi + cat >"${TARGET_DIR}/web-server.mjs" <<'WEB_SERVER' import http from 'node:http'; import fs from 'node:fs'; @@ -529,6 +585,7 @@ API_PORT="${GENARRATIVE_API_PORT:-__GENARRATIVE_DEFAULT_API_PORT__}" API_LOG="${GENARRATIVE_API_LOG:-info,tower_http=info}" WEB_HOST="${GENARRATIVE_WEB_HOST:-__GENARRATIVE_DEFAULT_WEB_HOST__}" WEB_PORT="${GENARRATIVE_WEB_PORT:-__GENARRATIVE_DEFAULT_WEB_PORT__}" +MIGRATION_BOOTSTRAP_SECRET_FILE="${SCRIPT_DIR}/migration-bootstrap-secret.txt" # 日志默认落文件,显式关闭 ANSI 颜色码,避免控制字符写入 *.log。 export NO_COLOR="${NO_COLOR:-1}" @@ -778,6 +835,11 @@ if [[ "${CLEAR_DATABASE}" -eq 1 ]]; then fi echo "[start] 发布 SpacetimeDB wasm: ${SPACETIME_DATABASE}" +if [[ -f "${MIGRATION_BOOTSTRAP_SECRET_FILE}" ]]; then + echo "[start] 迁移引导密钥: $(cat "${MIGRATION_BOOTSTRAP_SECRET_FILE}")" +else + echo "[start] 未启用迁移引导密钥。" +fi if ! spacetime --root-dir="${SPACETIME_ROOT_DIR}" "${PUBLISH_ARGS[@]}"; then echo "[start] SpacetimeDB 发布失败。" >&2 echo "[start] 如果错误包含 403 Forbidden 或 is not authorized,通常是当前 CLI 身份无权更新目标数据库。" >&2 @@ -868,6 +930,7 @@ cat >"${TARGET_DIR}/README.md" <"${TARGET_DIR}/README.md" <&2 + exit 1 + fi + ;; + disabled) + unset GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET + echo "[dev:rust] 未启用迁移引导密钥。" + return + ;; + *) + echo "[dev:rust] 未知迁移引导密钥模式: ${MIGRATION_BOOTSTRAP_SECRET_MODE}" >&2 + exit 1 + ;; + esac + + export GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET="${MIGRATION_BOOTSTRAP_SECRET}" + echo "[dev:rust] 迁移引导密钥: ${MIGRATION_BOOTSTRAP_SECRET}" +} + SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="$(cd -- "${SCRIPT_DIR}/.." && pwd)" SERVER_RS_DIR="${REPO_ROOT}/server-rs" @@ -244,6 +276,8 @@ API_SERVER_TIMEOUT_SECONDS="300" SKIP_SPACETIME=0 SKIP_PUBLISH=0 PRESERVE_DATABASE=0 +MIGRATION_BOOTSTRAP_SECRET="" +MIGRATION_BOOTSTRAP_SECRET_MODE="auto" PIDS=() NAMES=() @@ -334,6 +368,16 @@ while [[ $# -gt 0 ]]; do PRESERVE_DATABASE=1 shift ;; + --migration-bootstrap-secret) + MIGRATION_BOOTSTRAP_SECRET="${2:?缺少 --migration-bootstrap-secret 的值}" + MIGRATION_BOOTSTRAP_SECRET_MODE="manual" + shift 2 + ;; + --no-migration-bootstrap-secret) + MIGRATION_BOOTSTRAP_SECRET="" + MIGRATION_BOOTSTRAP_SECRET_MODE="disabled" + shift + ;; *) echo "[dev:rust] 未知参数: $1" >&2 usage >&2 @@ -417,6 +461,7 @@ fi if [[ "${SKIP_PUBLISH}" -ne 1 ]]; then echo "[dev:rust] 等待 SpacetimeDB 就绪" wait_for_spacetime "${SPACETIME_SERVER}" "${SPACETIME_TIMEOUT_SECONDS}" "${SPACETIME_ROOT_DIR}" "${PIDS[0]:-}" + prepare_migration_bootstrap_secret PUBLISH_ARGS=( publish diff --git a/scripts/spacetime-authorize-migration-operator.mjs b/scripts/spacetime-authorize-migration-operator.mjs new file mode 100644 index 00000000..82a074c5 --- /dev/null +++ b/scripts/spacetime-authorize-migration-operator.mjs @@ -0,0 +1,35 @@ +#!/usr/bin/env node + +import { + callSpacetimeProcedureViaCli, + ensureProcedureOk, + parseArgs, +} from './spacetime-migration-common.mjs'; + +try { + const options = parseArgs(process.argv.slice(2)); + if (!options.operatorIdentity) { + throw new Error('必须传入 --operator-identity。'); + } + + const input = { + bootstrap_secret: options.bootstrapSecret || '', + operator_identity_hex: options.operatorIdentity, + note: options.note || '', + }; + const result = await callSpacetimeProcedureViaCli( + options, + 'authorize_database_migration_operator', + input, + ); + ensureProcedureOk(result); + + console.log( + `[spacetime:migration:operator] 已授权 ${result.operator_identity_hex ?? options.operatorIdentity}`, + ); +} catch (error) { + console.error( + `[spacetime:migration:operator] ${error instanceof Error ? error.message : String(error)}`, + ); + process.exit(1); +} diff --git a/scripts/spacetime-export-migration-json.mjs b/scripts/spacetime-export-migration-json.mjs new file mode 100644 index 00000000..357bdf6a --- /dev/null +++ b/scripts/spacetime-export-migration-json.mjs @@ -0,0 +1,55 @@ +#!/usr/bin/env node + +import { writeFile } from 'node:fs/promises'; +import path from 'node:path'; +import { + callSpacetimeProcedureAuto, + ensureParentDir, + ensureProcedureOk, + parseArgs, +} from './spacetime-migration-common.mjs'; + +try { + const options = parseArgs(process.argv.slice(2)); + if (!options.out) { + throw new Error('必须传入 --out。'); + } + + const input = { + include_tables: options.includeTables, + }; + const result = await callSpacetimeProcedureAuto( + options, + 'export_database_migration_to_file', + input, + ); + ensureProcedureOk(result); + + if (typeof result.migration_json !== 'string' || result.migration_json.trim() === '') { + throw new Error('导出 procedure 没有返回 migration_json。'); + } + + const outPath = path.resolve(options.out); + await ensureParentDir(outPath); + await writeFile(outPath, result.migration_json, 'utf8'); + + console.log(`[spacetime:migration:export] 已写入 ${outPath}`); + printTableStats(result.table_stats); +} catch (error) { + console.error( + `[spacetime:migration:export] ${error instanceof Error ? error.message : String(error)}`, + ); + process.exit(1); +} + +function printTableStats(tableStats) { + if (!Array.isArray(tableStats) || tableStats.length === 0) { + return; + } + + const rows = tableStats.map((stat) => ({ + table: stat.table_name, + exported: stat.exported_row_count, + })); + console.table(rows); +} diff --git a/scripts/spacetime-import-migration-json.mjs b/scripts/spacetime-import-migration-json.mjs new file mode 100644 index 00000000..2b3b25e7 --- /dev/null +++ b/scripts/spacetime-import-migration-json.mjs @@ -0,0 +1,60 @@ +#!/usr/bin/env node + +import { readFile } from 'node:fs/promises'; +import path from 'node:path'; +import { + assertReadableFile, + callSpacetimeProcedureAuto, + ensureProcedureOk, + parseArgs, +} from './spacetime-migration-common.mjs'; + +try { + const options = parseArgs(process.argv.slice(2)); + if (!options.in) { + throw new Error('必须传入 --in。'); + } + + const inPath = path.resolve(options.in); + await assertReadableFile(inPath); + const migrationJson = await readFile(inPath, 'utf8'); + if (!migrationJson.trim()) { + throw new Error(`迁移文件为空: ${inPath}`); + } + + const input = { + migration_json: migrationJson, + include_tables: options.includeTables, + replace_existing: options.replaceExisting === true, + dry_run: options.dryRun === true, + }; + const result = await callSpacetimeProcedureAuto( + options, + 'import_database_migration_from_file', + input, + ); + ensureProcedureOk(result); + + console.log( + `[spacetime:migration:import] ${options.dryRun ? 'dry-run 完成' : '导入完成'}: ${inPath}`, + ); + printTableStats(result.table_stats); +} catch (error) { + console.error( + `[spacetime:migration:import] ${error instanceof Error ? error.message : String(error)}`, + ); + process.exit(1); +} + +function printTableStats(tableStats) { + if (!Array.isArray(tableStats) || tableStats.length === 0) { + return; + } + + const rows = tableStats.map((stat) => ({ + table: stat.table_name, + imported: stat.imported_row_count, + skipped: stat.skipped_row_count, + })); + console.table(rows); +} diff --git a/scripts/spacetime-migration-common.mjs b/scripts/spacetime-migration-common.mjs new file mode 100644 index 00000000..44f4bb78 --- /dev/null +++ b/scripts/spacetime-migration-common.mjs @@ -0,0 +1,337 @@ +import { spawn } from 'node:child_process'; +import { access, mkdir } from 'node:fs/promises'; +import path from 'node:path'; + +export function parseArgs(argv) { + const options = { + database: + process.env.GENARRATIVE_SPACETIME_MAINCLOUD_DATABASE || + process.env.GENARRATIVE_SPACETIME_DATABASE || + '', + bootstrapSecret: process.env.GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET || '', + includeTables: [], + operatorIdentity: process.env.GENARRATIVE_SPACETIME_MIGRATION_OPERATOR_IDENTITY || '', + passthrough: [], + note: '', + server: + process.env.GENARRATIVE_SPACETIME_MAINCLOUD_SERVER || + process.env.GENARRATIVE_SPACETIME_SERVER || + '', + serverUrl: + process.env.GENARRATIVE_SPACETIME_MAINCLOUD_SERVER_URL || + process.env.GENARRATIVE_SPACETIME_SERVER_URL || + '', + token: + process.env.GENARRATIVE_SPACETIME_MAINCLOUD_TOKEN || + process.env.GENARRATIVE_SPACETIME_TOKEN || + '', + }; + + for (let index = 0; index < argv.length; index += 1) { + const arg = argv[index]; + const readValue = (name) => { + const value = argv[index + 1]; + if (!value || value.startsWith('--')) { + throw new Error(`${name} 缺少参数值。`); + } + index += 1; + return value; + }; + + if (arg === '--server') { + options.server = readValue(arg); + } else if (arg === '--use-http') { + options.useHttp = true; + } else if (arg === '--server-url') { + options.serverUrl = readValue(arg); + } else if (arg === '--token') { + options.token = readValue(arg); + } else if (arg === '--bootstrap-secret') { + options.bootstrapSecret = readValue(arg); + } else if (arg === '--operator-identity') { + options.operatorIdentity = readValue(arg); + } else if (arg === '--note') { + options.note = readValue(arg); + } else if (arg === '--root-dir') { + options.rootDir = readValue(arg); + } else if (arg === '--database') { + options.database = readValue(arg); + } else if (arg === '--out') { + options.out = readValue(arg); + } else if (arg === '--in') { + options.in = readValue(arg); + } else if (arg === '--include') { + options.includeTables = readValue(arg) + .split(',') + .map((value) => value.trim()) + .filter(Boolean); + } else if (arg === '--replace-existing') { + options.replaceExisting = true; + } else if (arg === '--dry-run') { + options.dryRun = true; + } else if (arg === '--anonymous' || arg === '--no-config') { + options.passthrough.push(arg); + } else { + throw new Error(`未知参数: ${arg}`); + } + } + + return options; +} + +export function buildSpacetimeCallArgs(options, procedureName, input) { + if (!options.database) { + throw new Error('必须传入 --database。'); + } + + const args = []; + if (options.rootDir) { + args.push(`--root-dir=${options.rootDir}`); + } + args.push('call'); + if (options.server) { + args.push('-s', options.server); + } + args.push(...options.passthrough); + args.push(options.database, procedureName, JSON.stringify(input), '-y'); + return args; +} + +export async function callSpacetimeProcedure(options, procedureName, input) { + if (!options.database) { + throw new Error('必须传入 --database,或设置 GENARRATIVE_SPACETIME_DATABASE。'); + } + + const serverUrl = resolveServerUrl(options).replace(/\/+$/u, ''); + const url = `${serverUrl}/v1/database/${encodeURIComponent(options.database)}/call/${encodeURIComponent(procedureName)}`; + const headers = { + 'content-type': 'application/json; charset=utf-8', + }; + if (options.token) { + headers.authorization = `Bearer ${options.token}`; + } + + let response; + try { + response = await fetch(url, { + method: 'POST', + headers, + body: JSON.stringify([input]), + }); + } catch (error) { + throw new Error( + `SpacetimeDB HTTP 请求失败: ${url}; ${error instanceof Error ? error.message : String(error)}`, + ); + } + const text = await response.text(); + if (!response.ok) { + throw new Error( + `SpacetimeDB HTTP ${response.status}: ${trimPreview(text)}${buildHttpAuthHint(text)}`, + ); + } + + return parseProcedureResult(text); +} + +export async function callSpacetimeProcedureAuto(options, procedureName, input) { + if (options.useHttp) { + return callSpacetimeProcedure(options, procedureName, input); + } + + return callSpacetimeProcedureViaCli(options, procedureName, input); +} + +export async function callSpacetimeProcedureViaCli(options, procedureName, input) { + const args = buildSpacetimeCallArgs(options, procedureName, input); + const output = await runSpacetimeCli(args); + return parseProcedureResult(output); +} + +export function parseProcedureResult(output) { + const candidates = []; + const trimmed = output.trim(); + if (trimmed) { + candidates.push(trimmed); + } + + for (const line of output.split(/\r?\n/u)) { + const value = line.trim(); + if (value.startsWith('{') || value.startsWith('[')) { + candidates.push(value); + } + } + + for (const candidate of candidates) { + try { + return normalizeProcedureResult(JSON.parse(candidate)); + } catch { + // SpacetimeDB CLI 在不同版本中可能附带说明文本,继续尝试后续候选。 + } + } + + throw new Error(`无法解析 procedure 返回值: ${trimmed}`); +} + +export function ensureProcedureOk(result) { + if (!result.ok) { + throw new Error(result.error_message ?? '迁移 procedure 返回失败。'); + } +} + +export async function ensureParentDir(filePath) { + await mkdir(path.dirname(path.resolve(filePath)), { recursive: true }); +} + +export async function assertReadableFile(filePath) { + await access(path.resolve(filePath)); +} + +function normalizeProcedureResult(value) { + if (value && typeof value === 'object' && !Array.isArray(value)) { + return value; + } + + if (Array.isArray(value)) { + return normalizeSatsProduct(value); + } + + throw new Error('procedure 返回值不是对象。'); +} + +function normalizeSatsProduct(value) { + if (value.length === 3) { + return { + ok: normalizeSatsValue(value[0]), + operator_identity_hex: normalizeSatsOption(value[1]), + error_message: normalizeSatsOption(value[2]), + }; + } + + return { + ok: normalizeSatsValue(value[0]), + schema_version: normalizeSatsValue(value[1]), + migration_json: normalizeSatsOption(value[2]), + table_stats: normalizeTableStats(value[3]), + error_message: normalizeSatsOption(value[4]), + }; +} + +function normalizeSatsValue(value) { + if (Array.isArray(value)) { + return value.map((item) => normalizeSatsValue(item)); + } + + if (value && typeof value === 'object') { + return Object.fromEntries( + Object.entries(value).map(([key, entry]) => [key, normalizeSatsValue(entry)]), + ); + } + + return value; +} + +function normalizeSatsOption(value) { + if (Array.isArray(value)) { + if (value.length === 2 && value[0] === 0) { + return normalizeSatsValue(value[1]); + } + if (value.length === 0 || value[0] === 1) { + return null; + } + } + + return normalizeSatsValue(value); +} + +function normalizeTableStats(value) { + if (!Array.isArray(value)) { + return []; + } + + return value.map((entry) => { + if (entry && typeof entry === 'object' && !Array.isArray(entry)) { + return normalizeSatsValue(entry); + } + + if (Array.isArray(entry)) { + return { + table_name: normalizeSatsValue(entry[0]), + exported_row_count: normalizeSatsValue(entry[1]), + imported_row_count: normalizeSatsValue(entry[2]), + skipped_row_count: normalizeSatsValue(entry[3]), + }; + } + + return entry; + }); +} + +function resolveServerUrl(options) { + if (options.serverUrl) { + return options.serverUrl; + } + + const server = (options.server || 'maincloud').trim(); + if (server.startsWith('http://') || server.startsWith('https://')) { + return server; + } + if (server === 'dev') { + return 'http://127.0.0.1:3101'; + } + if (server === 'local') { + return 'http://127.0.0.1:3000'; + } + if (!server || server === 'maincloud') { + return 'https://maincloud.spacetimedb.com'; + } + + throw new Error(`未知 SpacetimeDB server: ${server}。请改用 --server-url 显式传入地址。`); +} + +function trimPreview(text) { + const trimmed = text.trim(); + if (trimmed.length <= 4000) { + return trimmed; + } + + return `${trimmed.slice(0, 4000)}...`; +} + +function buildHttpAuthHint(text) { + if (!text.includes('InvalidSignature') && !text.includes('TokenError')) { + return ''; + } + + return '。提示:这里需要 SpacetimeDB 客户端连接 token,不是 `spacetime login show --token` 输出的 CLI 登录 token;授权/撤销请直接使用 CLI 登录态,不要传 --token。'; +} + +function runSpacetimeCli(args) { + return new Promise((resolve, reject) => { + const child = spawn('spacetime', args, { + cwd: process.cwd(), + shell: false, + stdio: ['ignore', 'pipe', 'pipe'], + }); + let output = ''; + + child.stdout.on('data', (chunk) => { + output += chunk.toString(); + }); + child.stderr.on('data', (chunk) => { + output += chunk.toString(); + }); + child.on('error', reject); + child.on('exit', (code, signal) => { + if (signal) { + reject(new Error(`spacetime call 被信号中断: ${signal}`)); + return; + } + if (code !== 0) { + reject(new Error(`spacetime call 失败,退出码 ${code}: ${trimPreview(output)}`)); + return; + } + + resolve(output); + }); + }); +} diff --git a/scripts/spacetime-publish-maincloud.sh b/scripts/spacetime-publish-maincloud.sh index 805f67d9..319e2a94 100644 --- a/scripts/spacetime-publish-maincloud.sh +++ b/scripts/spacetime-publish-maincloud.sh @@ -7,6 +7,8 @@ SERVER_RS_DIR="${REPO_ROOT}/server-rs" MODULE_PATH="${SERVER_RS_DIR}/target/wasm32-unknown-unknown/release/spacetime_module.wasm" SPACETIME_SERVER_ALIAS="maincloud" CLEAR_DATABASE=0 +MIGRATION_BOOTSTRAP_SECRET="" +MIGRATION_BOOTSTRAP_SECRET_MODE="auto" load_env_file() { local env_file="$1" @@ -39,13 +41,45 @@ usage() { npm run spacetime:publish:maincloud npm run spacetime:publish:maincloud -- --database npm run spacetime:publish:maincloud -- --clear-database + npm run spacetime:publish:maincloud -- --no-migration-bootstrap-secret 说明: 发布 server-rs/crates/spacetime-module 到 SpacetimeDB Maincloud。 数据库名优先读取 --database,其次读取 GENARRATIVE_SPACETIME_MAINCLOUD_DATABASE。 + 默认在构建 wasm 前随机生成迁移引导密钥,注入 GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET 并显示在控制台。 EOF } +generate_migration_bootstrap_secret() { + node -e 'const crypto = require("crypto"); process.stdout.write(crypto.randomBytes(32).toString("hex"));' +} + +prepare_migration_bootstrap_secret() { + case "${MIGRATION_BOOTSTRAP_SECRET_MODE}" in + auto) + MIGRATION_BOOTSTRAP_SECRET="$(generate_migration_bootstrap_secret)" + ;; + manual) + if [[ "${#MIGRATION_BOOTSTRAP_SECRET}" -lt 16 ]]; then + echo "[spacetime:maincloud] 迁移引导密钥至少需要 16 个字符。" >&2 + exit 1 + fi + ;; + disabled) + unset GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET + echo "[spacetime:maincloud] 未启用迁移引导密钥。" + return + ;; + *) + echo "[spacetime:maincloud] 未知迁移引导密钥模式: ${MIGRATION_BOOTSTRAP_SECRET_MODE}" >&2 + exit 1 + ;; + esac + + export GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET="${MIGRATION_BOOTSTRAP_SECRET}" + echo "[spacetime:maincloud] 迁移引导密钥: ${MIGRATION_BOOTSTRAP_SECRET}" +} + load_env_file "${REPO_ROOT}/.env" load_env_file "${REPO_ROOT}/.env.local" @@ -70,6 +104,16 @@ while [[ $# -gt 0 ]]; do CLEAR_DATABASE=1 shift ;; + --migration-bootstrap-secret) + MIGRATION_BOOTSTRAP_SECRET="${2:?缺少 --migration-bootstrap-secret 的值}" + MIGRATION_BOOTSTRAP_SECRET_MODE="manual" + shift 2 + ;; + --no-migration-bootstrap-secret) + MIGRATION_BOOTSTRAP_SECRET="" + MIGRATION_BOOTSTRAP_SECRET_MODE="disabled" + shift + ;; *) echo "[spacetime:maincloud] 未知参数: $1" >&2 usage >&2 @@ -89,11 +133,18 @@ if ! command -v cargo >/dev/null 2>&1; then exit 1 fi +if ! command -v node >/dev/null 2>&1; then + echo "[spacetime:maincloud] 缺少 node 命令,无法生成迁移引导密钥。" >&2 + exit 1 +fi + if ! command -v spacetime >/dev/null 2>&1; then echo "[spacetime:maincloud] 缺少 spacetime CLI,请先安装并登录 Maincloud。" >&2 exit 1 fi +prepare_migration_bootstrap_secret + echo "[spacetime:maincloud] 构建 spacetime-module wasm" cargo build \ --manifest-path "${SERVER_RS_DIR}/Cargo.toml" \ diff --git a/scripts/spacetime-revoke-migration-operator.mjs b/scripts/spacetime-revoke-migration-operator.mjs new file mode 100644 index 00000000..71f72058 --- /dev/null +++ b/scripts/spacetime-revoke-migration-operator.mjs @@ -0,0 +1,33 @@ +#!/usr/bin/env node + +import { + callSpacetimeProcedureViaCli, + ensureProcedureOk, + parseArgs, +} from './spacetime-migration-common.mjs'; + +try { + const options = parseArgs(process.argv.slice(2)); + if (!options.operatorIdentity) { + throw new Error('必须传入 --operator-identity。'); + } + + const input = { + operator_identity_hex: options.operatorIdentity, + }; + const result = await callSpacetimeProcedureViaCli( + options, + 'revoke_database_migration_operator', + input, + ); + ensureProcedureOk(result); + + console.log( + `[spacetime:migration:operator] 已撤销 ${result.operator_identity_hex ?? options.operatorIdentity}`, + ); +} catch (error) { + console.error( + `[spacetime:migration:operator] ${error instanceof Error ? error.message : String(error)}`, + ); + process.exit(1); +} diff --git a/server-rs/Cargo.lock b/server-rs/Cargo.lock index f3f2aa7b..d0b66e69 100644 --- a/server-rs/Cargo.lock +++ b/server-rs/Cargo.lock @@ -2698,6 +2698,7 @@ dependencies = [ "serde_json", "shared-kernel", "spacetimedb", + "spacetimedb-lib", ] [[package]] diff --git a/server-rs/crates/spacetime-module/Cargo.toml b/server-rs/crates/spacetime-module/Cargo.toml index cc1f9adb..62749ac7 100644 --- a/server-rs/crates/spacetime-module/Cargo.toml +++ b/server-rs/crates/spacetime-module/Cargo.toml @@ -11,6 +11,7 @@ crate-type = ["cdylib"] log = { workspace = true } serde = { version = "1", features = ["derive"] } serde_json = "1" +spacetimedb-lib = { version = "=2.1.0", default-features = false, features = ["serde"] } module-ai = { path = "../module-ai", default-features = false, features = ["spacetime-types"] } module-assets = { path = "../module-assets", default-features = false, features = ["spacetime-types"] } module-big-fish = { path = "../module-big-fish", default-features = false, features = ["spacetime-types"] } diff --git a/server-rs/crates/spacetime-module/src/lib.rs b/server-rs/crates/spacetime-module/src/lib.rs index b51ec181..82a3b4ef 100644 --- a/server-rs/crates/spacetime-module/src/lib.rs +++ b/server-rs/crates/spacetime-module/src/lib.rs @@ -20,7 +20,9 @@ use module_quest::{ }; pub(crate) use serde_json::{Map as JsonMap, Value as JsonValue, json}; pub(crate) use shared_kernel::format_timestamp_micros; -pub use spacetimedb::{ProcedureContext, ReducerContext, SpacetimeType, Table, Timestamp}; +pub use spacetimedb::{ + Identity, ProcedureContext, ReducerContext, SpacetimeType, Table, Timestamp, +}; use std::collections::HashSet; mod ai; @@ -29,6 +31,7 @@ mod auth; mod big_fish; mod domain_types; mod entry; +mod migration; mod puzzle; mod runtime; @@ -38,6 +41,7 @@ pub use auth::*; pub use big_fish::*; pub use domain_types::*; pub use entry::*; +pub use migration::*; pub use runtime::*; #[spacetimedb::table(accessor = player_progression)] diff --git a/server-rs/crates/spacetime-module/src/migration.rs b/server-rs/crates/spacetime-module/src/migration.rs new file mode 100644 index 00000000..1ff8b286 --- /dev/null +++ b/server-rs/crates/spacetime-module/src/migration.rs @@ -0,0 +1,648 @@ +use crate::*; +use serde::{Deserialize, Serialize}; +use spacetimedb_lib::sats::de::serde::DeserializeWrapper; +use spacetimedb_lib::sats::ser::serde::SerializeWrapper; +use std::collections::HashSet; + +use crate::puzzle::{ + puzzle_agent_message, puzzle_agent_session, puzzle_runtime_run, puzzle_work_profile, +}; + +const MIGRATION_SCHEMA_VERSION: u32 = 1; +const MIGRATION_MAX_TABLE_NAME_LEN: usize = 96; +const MIGRATION_MAX_OPERATOR_NOTE_CHARS: usize = 160; +const MIGRATION_MIN_BOOTSTRAP_SECRET_LEN: usize = 16; +const MIGRATION_BOOTSTRAP_SECRET: Option<&str> = + option_env!("GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET"); + +#[spacetimedb::table(accessor = database_migration_operator)] +pub struct DatabaseMigrationOperator { + #[primary_key] + pub operator_identity: Identity, + pub created_at: Timestamp, + pub created_by: Identity, + pub note: String, +} + +#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)] +pub struct DatabaseMigrationExportInput { + pub include_tables: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)] +pub struct DatabaseMigrationImportInput { + pub migration_json: String, + pub include_tables: Vec, + pub replace_existing: bool, + pub dry_run: bool, +} + +#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)] +pub struct DatabaseMigrationAuthorizeOperatorInput { + pub bootstrap_secret: String, + pub operator_identity_hex: String, + pub note: String, +} + +#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)] +pub struct DatabaseMigrationRevokeOperatorInput { + pub operator_identity_hex: String, +} + +#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)] +pub struct DatabaseMigrationTableStat { + pub table_name: String, + pub exported_row_count: u64, + pub imported_row_count: u64, + pub skipped_row_count: u64, +} + +#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)] +pub struct DatabaseMigrationProcedureResult { + pub ok: bool, + pub schema_version: u32, + pub migration_json: Option, + pub table_stats: Vec, + pub error_message: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)] +pub struct DatabaseMigrationOperatorProcedureResult { + pub ok: bool, + pub operator_identity_hex: Option, + pub error_message: Option, +} + +#[derive(Serialize, Deserialize)] +struct MigrationFile { + schema_version: u32, + exported_at_micros: i64, + tables: Vec, +} + +#[derive(Serialize, Deserialize)] +struct MigrationTable { + name: String, + rows: Vec, +} + +macro_rules! migration_tables { + ($macro_name:ident $(, $arg:expr)* $(,)?) => { + $macro_name! { + $($arg,)* + auth_store_snapshot, + user_account, + auth_identity, + refresh_session, + ai_task, + ai_task_stage, + ai_text_chunk, + ai_result_reference, + runtime_snapshot, + runtime_setting, + user_browse_history, + profile_dashboard_state, + profile_wallet_ledger, + profile_invite_code, + profile_referral_relation, + profile_played_world, + profile_membership, + profile_recharge_order, + profile_save_archive, + player_progression, + chapter_progression, + npc_state, + story_session, + story_event, + inventory_slot, + battle_state, + treasure_record, + quest_record, + quest_log, + custom_world_profile, + custom_world_session, + custom_world_agent_session, + custom_world_agent_message, + custom_world_agent_operation, + custom_world_draft_card, + custom_world_gallery_entry, + asset_object, + asset_entity_binding, + puzzle_agent_session, + puzzle_agent_message, + puzzle_work_profile, + puzzle_runtime_run, + big_fish_creation_session, + big_fish_agent_message, + big_fish_asset_slot, + big_fish_runtime_run + } + }; +} + +macro_rules! collect_all_migration_tables { + ($ctx:expr, $include_tables:expr, $tables:expr) => { + migration_tables!(collect_migration_table, $ctx, $include_tables, $tables); + }; +} + +macro_rules! collect_migration_table { + ($ctx:expr, $include_tables:expr, $tables:expr, $($table:ident),+ $(,)?) => { + $( + if should_include_table($include_tables, stringify!($table)) { + let rows = $ctx + .db + .$table() + .iter() + .map(|row| row_to_json(&row)) + .collect::, _>>()?; + $tables.push(MigrationTable { + name: stringify!($table).to_string(), + rows, + }); + } + )+ + }; +} + +macro_rules! clear_all_migration_tables { + ($ctx:expr, $include_tables:expr) => { + migration_tables!(clear_migration_table, $ctx, $include_tables); + }; +} + +macro_rules! clear_migration_table { + ($ctx:expr, $include_tables:expr, $($table:ident),+ $(,)?) => { + $( + if should_include_table($include_tables, stringify!($table)) { + for row in $ctx.db.$table().iter().collect::>() { + $ctx.db.$table().delete(row); + } + } + )+ + }; +} + +// 迁移权限独立存表,避免把 private 表导出能力开放给任意登录身份。 +#[spacetimedb::procedure] +pub fn authorize_database_migration_operator( + ctx: &mut ProcedureContext, + input: DatabaseMigrationAuthorizeOperatorInput, +) -> DatabaseMigrationOperatorProcedureResult { + match authorize_database_migration_operator_inner(ctx, input) { + Ok(operator_identity_hex) => DatabaseMigrationOperatorProcedureResult { + ok: true, + operator_identity_hex: Some(operator_identity_hex), + error_message: None, + }, + Err(error) => DatabaseMigrationOperatorProcedureResult { + ok: false, + operator_identity_hex: None, + error_message: Some(error), + }, + } +} + +#[spacetimedb::procedure] +pub fn revoke_database_migration_operator( + ctx: &mut ProcedureContext, + input: DatabaseMigrationRevokeOperatorInput, +) -> DatabaseMigrationOperatorProcedureResult { + match revoke_database_migration_operator_inner(ctx, input) { + Ok(operator_identity_hex) => DatabaseMigrationOperatorProcedureResult { + ok: true, + operator_identity_hex: Some(operator_identity_hex), + error_message: None, + }, + Err(error) => DatabaseMigrationOperatorProcedureResult { + ok: false, + operator_identity_hex: None, + error_message: Some(error), + }, + } +} + +// 迁移导出走 procedure 返回 JSON 字符串,避免 reducer 无返回值且不能读取 private 表给外部。 +#[spacetimedb::procedure] +pub fn export_database_migration_to_file( + ctx: &mut ProcedureContext, + input: DatabaseMigrationExportInput, +) -> DatabaseMigrationProcedureResult { + match export_database_migration_to_file_inner(ctx, input) { + Ok((migration_json, stats)) => DatabaseMigrationProcedureResult { + ok: true, + schema_version: MIGRATION_SCHEMA_VERSION, + migration_json: Some(migration_json), + table_stats: stats, + error_message: None, + }, + Err(error) => DatabaseMigrationProcedureResult { + ok: false, + schema_version: MIGRATION_SCHEMA_VERSION, + migration_json: None, + table_stats: Vec::new(), + error_message: Some(error), + }, + } +} + +// 迁移导入由 Node 侧读文件后把 JSON 字符串传入,procedure 只负责校验和写表事务。 +#[spacetimedb::procedure] +pub fn import_database_migration_from_file( + ctx: &mut ProcedureContext, + input: DatabaseMigrationImportInput, +) -> DatabaseMigrationProcedureResult { + match import_database_migration_from_file_inner(ctx, input) { + Ok(stats) => DatabaseMigrationProcedureResult { + ok: true, + schema_version: MIGRATION_SCHEMA_VERSION, + migration_json: None, + table_stats: stats, + error_message: None, + }, + Err(error) => DatabaseMigrationProcedureResult { + ok: false, + schema_version: MIGRATION_SCHEMA_VERSION, + migration_json: None, + table_stats: Vec::new(), + error_message: Some(error), + }, + } +} + +fn export_database_migration_to_file_inner( + ctx: &mut ProcedureContext, + input: DatabaseMigrationExportInput, +) -> Result<(String, Vec), String> { + let caller = ctx.sender(); + let included_tables = normalize_include_tables(&input.include_tables)?; + let exported_at_micros = ctx.timestamp.to_micros_since_unix_epoch(); + + let migration_file = ctx.try_with_tx(|tx| { + require_migration_operator(tx, caller)?; + build_migration_file(tx, exported_at_micros, included_tables.as_ref()) + })?; + let stats = build_export_stats(&migration_file.tables); + let content = serde_json::to_string_pretty(&migration_file) + .map_err(|error| format!("迁移文件序列化失败: {error}"))?; + + Ok((content, stats)) +} + +fn import_database_migration_from_file_inner( + ctx: &mut ProcedureContext, + input: DatabaseMigrationImportInput, +) -> Result, String> { + let caller = ctx.sender(); + let included_tables = normalize_include_tables(&input.include_tables)?; + if input.migration_json.trim().is_empty() { + return Err("migration_json 不能为空".to_string()); + } + ctx.try_with_tx(|tx| require_migration_operator(tx, caller))?; + + let migration_file = serde_json::from_str::(&input.migration_json) + .map_err(|error| format!("迁移文件 JSON 解析失败: {error}"))?; + if migration_file.schema_version != MIGRATION_SCHEMA_VERSION { + return Err(format!( + "迁移文件 schema_version 不匹配,期望 {},实际 {}", + MIGRATION_SCHEMA_VERSION, migration_file.schema_version + )); + } + + let stats = if input.dry_run { + build_import_dry_run_stats(&migration_file.tables, included_tables.as_ref())? + } else { + ctx.try_with_tx(|tx| { + require_migration_operator(tx, caller)?; + apply_migration_file( + tx, + &migration_file, + included_tables.as_ref(), + input.replace_existing, + ) + })? + }; + + Ok(stats) +} + +fn authorize_database_migration_operator_inner( + ctx: &mut ProcedureContext, + input: DatabaseMigrationAuthorizeOperatorInput, +) -> Result { + let caller = ctx.sender(); + let operator_identity = parse_migration_operator_identity(&input.operator_identity_hex)?; + let note = normalize_migration_operator_note(&input.note)?; + let bootstrap_secret = input.bootstrap_secret.trim().to_string(); + + ctx.try_with_tx(|tx| { + authorize_database_migration_operator_tx( + tx, + caller, + operator_identity, + &bootstrap_secret, + note.clone(), + ) + })?; + + Ok(operator_identity.to_hex().to_string()) +} + +fn revoke_database_migration_operator_inner( + ctx: &mut ProcedureContext, + input: DatabaseMigrationRevokeOperatorInput, +) -> Result { + let caller = ctx.sender(); + let operator_identity = parse_migration_operator_identity(&input.operator_identity_hex)?; + + ctx.try_with_tx(|tx| { + require_migration_operator(tx, caller)?; + if tx + .db + .database_migration_operator() + .operator_identity() + .find(&operator_identity) + .is_none() + { + return Err("迁移操作员不存在".to_string()); + } + tx.db + .database_migration_operator() + .operator_identity() + .delete(&operator_identity); + Ok(()) + })?; + + Ok(operator_identity.to_hex().to_string()) +} + +fn authorize_database_migration_operator_tx( + ctx: &ReducerContext, + caller: Identity, + operator_identity: Identity, + bootstrap_secret: &str, + note: String, +) -> Result<(), String> { + let has_operator = ctx.db.database_migration_operator().iter().next().is_some(); + if has_operator { + require_migration_operator(ctx, caller)?; + } else { + require_migration_bootstrap_secret(bootstrap_secret)?; + } + + if ctx + .db + .database_migration_operator() + .operator_identity() + .find(&operator_identity) + .is_some() + { + ctx.db + .database_migration_operator() + .operator_identity() + .delete(&operator_identity); + } + + ctx.db + .database_migration_operator() + .insert(DatabaseMigrationOperator { + operator_identity, + created_at: ctx.timestamp, + created_by: caller, + note, + }); + + Ok(()) +} + +fn require_migration_operator(ctx: &ReducerContext, caller: Identity) -> Result<(), String> { + if ctx + .db + .database_migration_operator() + .operator_identity() + .find(&caller) + .is_some() + { + Ok(()) + } else { + Err("当前 identity 未被授权执行数据库迁移".to_string()) + } +} + +fn require_migration_bootstrap_secret(input: &str) -> Result<(), String> { + let configured_secret = MIGRATION_BOOTSTRAP_SECRET + .map(str::trim) + .filter(|secret| !secret.is_empty()) + .ok_or_else(|| "迁移引导密钥未配置,无法创建首个操作员".to_string())?; + + if configured_secret.chars().count() < MIGRATION_MIN_BOOTSTRAP_SECRET_LEN { + return Err("迁移引导密钥长度不足,至少需要 16 个字符".to_string()); + } + if input != configured_secret { + return Err("迁移引导密钥不正确".to_string()); + } + + Ok(()) +} + +fn parse_migration_operator_identity(input: &str) -> Result { + let identity_hex = input.trim().trim_start_matches("0x"); + if identity_hex.len() != 64 { + return Err("operator_identity_hex 必须是 64 位十六进制 identity".to_string()); + } + + Identity::from_hex(identity_hex) + .map_err(|error| format!("operator_identity_hex 格式不合法: {error}")) +} + +fn normalize_migration_operator_note(input: &str) -> Result { + let note = input.trim(); + if note.chars().count() > MIGRATION_MAX_OPERATOR_NOTE_CHARS { + return Err(format!( + "迁移操作员备注过长,最多 {} 个字符", + MIGRATION_MAX_OPERATOR_NOTE_CHARS + )); + } + + Ok(note.to_string()) +} + +fn normalize_include_tables(input: &[String]) -> Result>, String> { + if input.is_empty() { + return Ok(None); + } + + let mut tables = HashSet::new(); + for raw_name in input { + let name = raw_name.trim(); + if name.is_empty() { + continue; + } + if name.len() > MIGRATION_MAX_TABLE_NAME_LEN { + return Err(format!("迁移表名过长: {name}")); + } + if !is_supported_migration_table(name) { + return Err(format!("迁移表不在白名单内: {name}")); + } + tables.insert(name.to_string()); + } + Ok(Some(tables)) +} + +fn should_include_table(include_tables: Option<&HashSet>, table_name: &str) -> bool { + include_tables + .map(|tables| tables.contains(table_name)) + .unwrap_or(true) +} + +fn build_migration_file( + ctx: &ReducerContext, + exported_at_micros: i64, + include_tables: Option<&HashSet>, +) -> Result { + let mut tables = Vec::new(); + collect_all_migration_tables!(ctx, include_tables, tables); + + Ok(MigrationFile { + schema_version: MIGRATION_SCHEMA_VERSION, + exported_at_micros, + tables, + }) +} + +fn build_export_stats(tables: &[MigrationTable]) -> Vec { + tables + .iter() + .map(|table| DatabaseMigrationTableStat { + table_name: table.name.clone(), + exported_row_count: table.rows.len() as u64, + imported_row_count: 0, + skipped_row_count: 0, + }) + .collect() +} + +fn build_import_dry_run_stats( + tables: &[MigrationTable], + include_tables: Option<&HashSet>, +) -> Result, String> { + let mut stats = Vec::new(); + for table in tables { + if !is_supported_migration_table(&table.name) { + return Err(format!("迁移文件包含不支持的表: {}", table.name)); + } + if should_include_table(include_tables, &table.name) { + stats.push(DatabaseMigrationTableStat { + table_name: table.name.clone(), + exported_row_count: 0, + imported_row_count: table.rows.len() as u64, + skipped_row_count: 0, + }); + } else { + stats.push(DatabaseMigrationTableStat { + table_name: table.name.clone(), + exported_row_count: 0, + imported_row_count: 0, + skipped_row_count: table.rows.len() as u64, + }); + } + } + Ok(stats) +} + +fn apply_migration_file( + ctx: &ReducerContext, + migration_file: &MigrationFile, + include_tables: Option<&HashSet>, + replace_existing: bool, +) -> Result, String> { + let mut stats = Vec::new(); + for table in &migration_file.tables { + if !is_supported_migration_table(&table.name) { + return Err(format!("迁移文件包含不支持的表: {}", table.name)); + } + } + + if replace_existing { + clear_all_migration_tables!(ctx, include_tables); + } + + for table in &migration_file.tables { + if !should_include_table(include_tables, &table.name) { + stats.push(DatabaseMigrationTableStat { + table_name: table.name.clone(), + exported_row_count: 0, + imported_row_count: 0, + skipped_row_count: table.rows.len() as u64, + }); + continue; + } + + let imported_row_count = insert_migration_table_rows(ctx, table)?; + stats.push(DatabaseMigrationTableStat { + table_name: table.name.clone(), + exported_row_count: 0, + imported_row_count, + skipped_row_count: 0, + }); + } + + Ok(stats) +} + +fn row_to_json(row: &T) -> Result { + serde_json::to_value(SerializeWrapper::from_ref(row)) + .map_err(|error| format!("迁移行序列化失败: {error}")) +} + +fn row_from_json(value: &serde_json::Value) -> Result +where + T: for<'de> spacetimedb::Deserialize<'de>, +{ + let wrapped: DeserializeWrapper = serde_json::from_value(value.clone()) + .map_err(|error| format!("迁移行反序列化失败: {error}"))?; + Ok(wrapped.0) +} + +fn insert_migration_table_rows( + ctx: &ReducerContext, + table: &MigrationTable, +) -> Result { + macro_rules! insert_table_match_arm { + ($($table:ident),+ $(,)?) => { + match table.name.as_str() { + $( + stringify!($table) => { + let mut imported = 0u64; + for value in &table.rows { + let row = row_from_json(value) + .map_err(|error| format!("{}: {error}", stringify!($table)))?; + ctx.db + .$table() + .try_insert(row) + .map_err(|error| format!("{} 导入失败: {error}", stringify!($table)))?; + imported = imported.saturating_add(1); + } + Ok(imported) + } + )+ + _ => Err(format!("迁移表不在白名单内: {}", table.name)), + } + }; + } + + migration_tables!(insert_table_match_arm) +} + +fn is_supported_migration_table(table_name: &str) -> bool { + macro_rules! supported_table_match { + ($($table:ident),+ $(,)?) => { + matches!( + table_name, + $(stringify!($table))|+ + ) + }; + } + + migration_tables!(supported_table_match) +} From 3178c26095bf9d005dadec549614fe930927a2a1 Mon Sep 17 00:00:00 2001 From: kdletters Date: Mon, 27 Apr 2026 16:06:54 +0800 Subject: [PATCH 2/8] fix login entry fallback --- .../AUTH_LOGIN_OPTIONS_DESIGN_2026-04-21.md | 18 +++++--- server-rs/crates/api-server/src/app.rs | 28 +++++++++++++ server-rs/crates/shared-contracts/src/auth.rs | 9 +++- src/components/auth/AuthGate.test.tsx | 42 +++++++++++++++++++ src/components/auth/AuthGate.tsx | 28 +++++++++++-- 5 files changed, 114 insertions(+), 11 deletions(-) diff --git a/docs/technical/AUTH_LOGIN_OPTIONS_DESIGN_2026-04-21.md b/docs/technical/AUTH_LOGIN_OPTIONS_DESIGN_2026-04-21.md index 712c3f4f..a3b56830 100644 --- a/docs/technical/AUTH_LOGIN_OPTIONS_DESIGN_2026-04-21.md +++ b/docs/technical/AUTH_LOGIN_OPTIONS_DESIGN_2026-04-21.md @@ -11,6 +11,7 @@ 当前阶段只解决一件事: 1. 由 `Axum` 根据服务端配置,返回当前环境启用的登录方式列表。 +2. 密码登录入口由 Rust `password_entry` 固定承载,作为登录弹窗的保底入口。 本阶段明确不包含: @@ -31,7 +32,7 @@ ```json { - "availableLoginMethods": ["phone", "wechat"] + "availableLoginMethods": ["phone", "password", "wechat"] } ``` @@ -40,6 +41,7 @@ 1. `availableLoginMethods` 为字符串数组 2. 当前阶段只允许出现: - `phone` + - `password` - `wechat` ### 3.3 返回顺序 @@ -47,7 +49,8 @@ 返回顺序固定为: 1. 先 `phone` -2. 再 `wechat` +2. 再 `password` +3. 再 `wechat` 这样可以保证前端按钮顺序稳定,不因配置解析顺序变化而漂移。 @@ -61,8 +64,9 @@ 映射规则固定为: 1. `SMS_AUTH_ENABLED=true` 时返回 `phone` -2. `WECHAT_AUTH_ENABLED=true` 时返回 `wechat` -3. 两者都关闭时返回空数组 +2. Rust 密码登录主链可用时固定返回 `password` +3. `WECHAT_AUTH_ENABLED=true` 时返回 `wechat` +4. 短信与微信都关闭时仍返回 `["password"]` ## 5. crate 边界 @@ -84,13 +88,15 @@ 1. 根据 `availableLoginMethods` 决定是否展示手机号 / 微信入口 2. 不再假设某种登录方式一定存在 +3. 若 `/api/auth/login-options` 联调失败或返回空数组,前端仍保留 `password` 入口,避免登录弹窗显示“当前登录入口暂不可用”后无法继续操作。 ## 6. 测试要求 至少覆盖: -1. 默认配置下返回空数组 -2. 同时启用短信与微信时返回 `["phone", "wechat"]` +1. 默认配置下返回 `["password"]` +2. 同时启用短信、密码与微信时返回 `["phone", "password", "wechat"]` +3. 前端在 `login-options` 读取失败或返回空数组时,仍展示密码登录表单 ## 7. 完成定义 diff --git a/server-rs/crates/api-server/src/app.rs b/server-rs/crates/api-server/src/app.rs index 73e091b1..9446bcd4 100644 --- a/server-rs/crates/api-server/src/app.rs +++ b/server-rs/crates/api-server/src/app.rs @@ -1437,6 +1437,34 @@ mod tests { ); } + #[tokio::test] + async fn auth_login_options_keeps_password_entry_when_external_methods_disabled() { + let app = build_router(AppState::new(AppConfig::default()).expect("state should build")); + let response = app + .oneshot( + Request::builder() + .uri("/api/auth/login-options") + .body(Body::empty()) + .expect("request should build"), + ) + .await + .expect("request should succeed"); + + assert_eq!(response.status(), StatusCode::OK); + let body = response + .into_body() + .collect() + .await + .expect("body should collect") + .to_bytes(); + let payload: Value = serde_json::from_slice(&body).expect("body should be valid json"); + + assert_eq!( + payload["availableLoginMethods"], + serde_json::json!(["password"]) + ); + } + #[tokio::test] async fn send_phone_code_returns_mock_cooldown_and_expire_seconds() { let config = AppConfig { diff --git a/server-rs/crates/shared-contracts/src/auth.rs b/server-rs/crates/shared-contracts/src/auth.rs index faa7ba03..9821054b 100644 --- a/server-rs/crates/shared-contracts/src/auth.rs +++ b/server-rs/crates/shared-contracts/src/auth.rs @@ -215,7 +215,7 @@ mod tests { use serde_json::json; #[test] - fn available_login_methods_keep_phone_then_wechat_order() { + fn available_login_methods_keep_phone_password_wechat_order() { let methods = build_available_login_methods(true, true, true); assert_eq!( @@ -228,6 +228,13 @@ mod tests { ); } + #[test] + fn available_login_methods_keep_password_as_default_entry() { + let methods = build_available_login_methods(false, true, false); + + assert_eq!(methods, vec![AUTH_LOGIN_METHOD_PASSWORD.to_string()]); + } + #[test] fn password_entry_request_uses_camel_case_fields() { let payload = serde_json::to_value(PasswordEntryRequest { diff --git a/src/components/auth/AuthGate.test.tsx b/src/components/auth/AuthGate.test.tsx index 24d4aa7b..ddbcca76 100644 --- a/src/components/auth/AuthGate.test.tsx +++ b/src/components/auth/AuthGate.test.tsx @@ -216,6 +216,48 @@ test('auth gate does not auto-create a guest account when dev guest switch is no expect(await screen.findByText('应用内容')).toBeTruthy(); }); +test('auth gate keeps password entry available when login options are empty', async () => { + const user = userEvent.setup(); + + authMocks.getCurrentAuthUser.mockResolvedValue({ + user: null, + availableLoginMethods: [], + }); + authMocks.getAuthLoginOptions.mockResolvedValue({ + availableLoginMethods: [], + }); + + render( + + + , + ); + + await user.click(await screen.findByRole('button', { name: '进入作品' })); + + const dialog = screen.getByRole('dialog', { name: '账号入口' }); + expect(within(dialog).getByLabelText('密码')).toBeTruthy(); + expect(within(dialog).queryByText('当前登录入口暂不可用。')).toBeNull(); +}); + +test('auth gate falls back to password entry when login options request fails', async () => { + const user = userEvent.setup(); + + authMocks.getAuthLoginOptions.mockRejectedValue(new Error('读取登录方式失败')); + + render( + + + , + ); + + await user.click(await screen.findByRole('button', { name: '进入作品' })); + + const dialog = screen.getByRole('dialog', { name: '账号入口' }); + expect(within(dialog).getByLabelText('密码')).toBeTruthy(); + expect(within(dialog).queryByText('当前登录入口暂不可用。')).toBeNull(); +}); + test('auth gate opens a login modal for protected actions and resumes after login', async () => { const user = userEvent.setup(); const onAuthenticated = vi.fn(); diff --git a/src/components/auth/AuthGate.tsx b/src/components/auth/AuthGate.tsx index 0e01ba41..4263167b 100644 --- a/src/components/auth/AuthGate.tsx +++ b/src/components/auth/AuthGate.tsx @@ -57,6 +57,20 @@ type AuthStatus = | 'ready' | 'error'; +const FALLBACK_LOGIN_METHODS: AuthLoginMethod[] = ['password']; + +function normalizeAvailableLoginMethods( + methods: AuthLoginMethod[] | null | undefined, +): AuthLoginMethod[] { + const normalizedMethods = Array.from(new Set(methods ?? [])); + + // 密码登录由 Rust auth entry 固定承载,不依赖短信或微信环境开关。 + // 当 login-options 联调失败或配置返回空数组时,仍要保留账号入口,避免登录弹窗失去可操作方式。 + return normalizedMethods.length > 0 + ? normalizedMethods + : FALLBACK_LOGIN_METHODS; +} + export function AuthGate({ children }: AuthGateProps) { const [status, setStatus] = useState('checking'); const [user, setUser] = useState(null); @@ -202,7 +216,9 @@ export function AuthGate({ children }: AuthGateProps) { return null; } - setAvailableLoginMethods(options.availableLoginMethods); + setAvailableLoginMethods( + normalizeAvailableLoginMethods(options.availableLoginMethods), + ); return options; }; @@ -220,7 +236,7 @@ export function AuthGate({ children }: AuthGateProps) { return; } - setAvailableLoginMethods([]); + setAvailableLoginMethods(FALLBACK_LOGIN_METHODS); setUser(null); setError( optionsError instanceof Error @@ -245,13 +261,17 @@ export function AuthGate({ children }: AuthGateProps) { } if (!nextSession.user) { - setAvailableLoginMethods(nextSession.availableLoginMethods); + setAvailableLoginMethods( + normalizeAvailableLoginMethods(nextSession.availableLoginMethods), + ); await resolveGuestFallback(); return; } setUser(nextSession.user); - setAvailableLoginMethods(nextSession.availableLoginMethods); + setAvailableLoginMethods( + normalizeAvailableLoginMethods(nextSession.availableLoginMethods), + ); setStatus( nextSession.user.bindingStatus === 'pending_bind_phone' ? 'pending_bind_phone' From 9aae7afb2e5e540af45d97773c4a5d2ce436e333 Mon Sep 17 00:00:00 2001 From: kdletters Date: Mon, 27 Apr 2026 16:46:01 +0800 Subject: [PATCH 3/8] fix: import migration via web api body --- ...N_STRING_MIGRATION_PROCEDURE_2026-04-27.md | 20 +++-- scripts/spacetime-import-migration-json.mjs | 84 ++++++++++++++++--- scripts/spacetime-migration-common.mjs | 38 ++++++++- 3 files changed, 124 insertions(+), 18 deletions(-) diff --git a/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md b/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md index 51b56153..6f1274a4 100644 --- a/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md +++ b/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md @@ -8,12 +8,12 @@ SpacetimeDB reducer 必须保持确定性,不能访问文件系统和网络。 1. `spacetime-module` 内的导出 procedure 读取迁移白名单表,并直接返回迁移 JSON 字符串。 2. Node 运维脚本默认通过 `spacetime call` 调用导出 procedure,把返回的 JSON 字符串写入本地文件。 -3. Node 运维脚本读取本地 JSON 文件内容,并通过 HTTP request body 作为字符串参数传给导入 procedure。 +3. Node 运维脚本读取本地 JSON 文件内容。导入时默认先通过 `POST /v1/identity` 创建临时 Web API identity/token,再用当前 CLI 登录态把该 identity 授权为迁移操作员,最后通过 HTTP request body 把 JSON 字符串传给导入 procedure。 4. 导入 procedure 校验 JSON 与表白名单后,在事务中写入目标数据库。 procedure 不再访问 HTTP 文件桥,也不接收部署机本地文件路径。这样可以避开 SpacetimeDB 对 private/special-purpose 地址的 HTTP 访问限制,并避免把 private 表内容通过临时 HTTP 服务转发。 -`spacetime login show --token` 输出的是 CLI 登录 token,不是 HTTP `/v1/database/.../call` 所需的数据库连接 token。运维脚本默认走 CLI 登录态,迁移时不要把 CLI token 传给 `--token`;只有显式传 `--use-http` 时才需要数据库连接 token。 +`spacetime login show --token` 输出的是 CLI 登录 token,不是 HTTP `/v1/database/.../call` 所需的数据库连接 token。导入脚本如果没有显式传 `--token`,会自动调用 `POST /v1/identity` 获取 Web API token;迁移时不要把 CLI token 传给 `--token`。 ## 接口 @@ -145,16 +145,26 @@ node scripts/spacetime-authorize-migration-operator.mjs \ --note "server import" ``` -导入脚本负责读取服务器本地文件并把 JSON 字符串传入目标库 procedure: +导入脚本负责读取服务器本地文件并把 JSON 字符串通过 Web API request body 传入目标库 procedure。因为 JSON 不再放进 `spacetime call` 命令行参数,所以不会触发 Linux `spawn E2BIG`: ```bash node scripts/spacetime-import-migration-json.mjs \ --server maincloud \ --database xushi-p4wfr \ + --bootstrap-secret <服务器目标库发布时输出的随机密钥> \ --in tmp/spacetime-migrations/source-2026-04-27.json \ --replace-existing ``` +默认情况下,脚本会自动完成三步: + +1. `POST /v1/identity` 创建临时 Web API identity/token。 +2. 使用当前机器 `spacetime` CLI 登录态调用 `authorize_database_migration_operator`,授权这个临时 identity。 +3. 使用 `Authorization: Bearer <临时 token>` 调用 `import_database_migration_from_file`,把完整迁移 JSON 放在 HTTP body 中。 +4. 导入请求结束后,脚本会用同一个临时 Web API token 调用 `revoke_database_migration_operator`,撤销该临时 identity。 + +如果你已经有可用的数据库连接 token,也可以显式传 `--token `。这种情况下脚本不会自动授权;该 token 对应的 identity 必须已经是迁移操作员。 + 正式导入前建议先加 `--dry-run`,确认 JSON 可解析、版本匹配、表名都在迁移白名单内。 如需分批迁移,可用逗号分隔表名: @@ -166,7 +176,7 @@ node scripts/spacetime-export-migration-json.mjs \ --include ai_task,ai_task_stage,ai_text_chunk,ai_result_reference ``` -`--server` 支持 `dev`、`local`、`maincloud`,也可以直接传 SpacetimeDB 服务器 URL。脚本默认走 `spacetime call`,使用当前机器的 CLI 登录态。数据库名可通过 `--database`、`GENARRATIVE_SPACETIME_MAINCLOUD_DATABASE` 或 `GENARRATIVE_SPACETIME_DATABASE` 提供。 +`--server` 支持 `dev`、`local`、`maincloud`,也可以直接传 SpacetimeDB 服务器 URL。导出、授权、撤销默认走 `spacetime call`,使用当前机器的 CLI 登录态;导入默认走 Web API request body,避免大 JSON 触发命令行长度限制。数据库名可通过 `--database`、`GENARRATIVE_SPACETIME_MAINCLOUD_DATABASE` 或 `GENARRATIVE_SPACETIME_DATABASE` 提供。 授权脚本额外支持: @@ -193,4 +203,4 @@ node scripts/spacetime-export-migration-json.mjs \ 迁移 JSON 作为 procedure 返回值和 HTTP request body 传递,会受 SpacetimeDB 调用响应体、请求体以及中间代理大小限制。数据量较大时,先按 `include_tables` 分批迁移;若单表本身过大,再补充分片 procedure,而不是恢复 HTTP 文件桥。 -`spacetime call` 在 PowerShell 中手写 JSON 容易被剥掉双引号。推荐使用仓库里的 Node 脚本,由脚本直接走 HTTP API,避免 shell 二次处理和命令行长度限制。 +`spacetime call` 在 PowerShell 中手写 JSON 容易被剥掉双引号。导入大文件时也不能把完整 JSON 放进命令行参数,否则 Linux 会在启动子进程时返回 `spawn E2BIG`。推荐使用仓库里的 Node 脚本,由脚本直接走 Web API request body,避免 shell 二次处理和命令行长度限制。 diff --git a/scripts/spacetime-import-migration-json.mjs b/scripts/spacetime-import-migration-json.mjs index 2b3b25e7..1f47fd74 100644 --- a/scripts/spacetime-import-migration-json.mjs +++ b/scripts/spacetime-import-migration-json.mjs @@ -4,7 +4,9 @@ import { readFile } from 'node:fs/promises'; import path from 'node:path'; import { assertReadableFile, - callSpacetimeProcedureAuto, + callSpacetimeProcedure, + callSpacetimeProcedureViaCli, + createSpacetimeWebIdentity, ensureProcedureOk, parseArgs, } from './spacetime-migration-common.mjs'; @@ -22,17 +24,13 @@ try { throw new Error(`迁移文件为空: ${inPath}`); } - const input = { - migration_json: migrationJson, - include_tables: options.includeTables, - replace_existing: options.replaceExisting === true, - dry_run: options.dryRun === true, - }; - const result = await callSpacetimeProcedureAuto( - options, - 'import_database_migration_from_file', - input, - ); + const webOptions = await prepareWebImportOptions(options); + let result; + try { + result = await importMigrationJsonDirect(webOptions, migrationJson); + } finally { + await revokeTemporaryWebIdentity(webOptions); + } ensureProcedureOk(result); console.log( @@ -46,6 +44,68 @@ try { process.exit(1); } +async function prepareWebImportOptions(options) { + if (options.token) { + return { ...options, useHttp: true }; + } + + const identity = await createSpacetimeWebIdentity(options); + console.log( + `[spacetime:migration:import] 已通过 Web API 创建临时 identity: ${identity.identity}`, + ); + + const authorizeResult = await callSpacetimeProcedureViaCli( + options, + 'authorize_database_migration_operator', + { + bootstrap_secret: options.bootstrapSecret || '', + operator_identity_hex: identity.identity, + note: options.note || 'temporary web api migration import', + }, + ); + ensureProcedureOk(authorizeResult); + console.log(`[spacetime:migration:import] 已授权临时 Web API identity`); + + return { + ...options, + token: identity.token, + temporaryWebIdentity: identity.identity, + useHttp: true, + }; +} + +async function importMigrationJsonDirect(options, migrationJson) { + const input = { + migration_json: migrationJson, + include_tables: options.includeTables, + replace_existing: options.replaceExisting === true, + dry_run: options.dryRun === true, + }; + return callSpacetimeProcedure(options, 'import_database_migration_from_file', input); +} + +async function revokeTemporaryWebIdentity(options) { + if (!options.temporaryWebIdentity) { + return; + } + + try { + const revokeResult = await callSpacetimeProcedure( + options, + 'revoke_database_migration_operator', + { operator_identity_hex: options.temporaryWebIdentity }, + ); + ensureProcedureOk(revokeResult); + console.log(`[spacetime:migration:import] 已撤销临时 Web API identity`); + } catch (error) { + console.warn( + `[spacetime:migration:import] 撤销临时 Web API identity 失败: ${ + error instanceof Error ? error.message : String(error) + }`, + ); + } +} + function printTableStats(tableStats) { if (!Array.isArray(tableStats) || tableStats.length === 0) { return; diff --git a/scripts/spacetime-migration-common.mjs b/scripts/spacetime-migration-common.mjs index 44f4bb78..b65550c8 100644 --- a/scripts/spacetime-migration-common.mjs +++ b/scripts/spacetime-migration-common.mjs @@ -133,6 +133,42 @@ export async function callSpacetimeProcedure(options, procedureName, input) { return parseProcedureResult(text); } +export async function createSpacetimeWebIdentity(options) { + const serverUrl = resolveServerUrl(options).replace(/\/+$/u, ''); + const url = `${serverUrl}/v1/identity`; + let response; + try { + response = await fetch(url, { method: 'POST' }); + } catch (error) { + throw new Error( + `SpacetimeDB identity 请求失败: ${url}; ${error instanceof Error ? error.message : String(error)}`, + ); + } + + const text = await response.text(); + if (!response.ok) { + throw new Error(`SpacetimeDB identity HTTP ${response.status}: ${trimPreview(text)}`); + } + + let payload; + try { + payload = JSON.parse(text); + } catch (error) { + throw new Error( + `SpacetimeDB identity 响应不是合法 JSON: ${error instanceof Error ? error.message : String(error)}`, + ); + } + + const identity = + payload.identity ?? payload.Identity ?? payload.identity_hex ?? payload.identityHex; + const token = payload.token ?? payload.Token; + if (typeof identity !== 'string' || typeof token !== 'string') { + throw new Error(`SpacetimeDB identity 响应缺少 identity/token: ${trimPreview(text)}`); + } + + return { identity, token }; +} + export async function callSpacetimeProcedureAuto(options, procedureName, input) { if (options.useHttp) { return callSpacetimeProcedure(options, procedureName, input); @@ -266,7 +302,7 @@ function normalizeTableStats(value) { }); } -function resolveServerUrl(options) { +export function resolveServerUrl(options) { if (options.serverUrl) { return options.serverUrl; } From 1e4a64f54202c7a43756f4671c07cd6d40ae6133 Mon Sep 17 00:00:00 2001 From: kdletters Date: Mon, 27 Apr 2026 16:54:38 +0800 Subject: [PATCH 4/8] fix: set json content type for spacetime web api --- ...MEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md | 2 ++ scripts/spacetime-migration-common.mjs | 11 ++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md b/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md index 6f1274a4..9dd88b5b 100644 --- a/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md +++ b/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md @@ -163,6 +163,8 @@ node scripts/spacetime-import-migration-json.mjs \ 3. 使用 `Authorization: Bearer <临时 token>` 调用 `import_database_migration_from_file`,把完整迁移 JSON 放在 HTTP body 中。 4. 导入请求结束后,脚本会用同一个临时 Web API token 调用 `revoke_database_migration_operator`,撤销该临时 identity。 +所有直接访问 SpacetimeDB Web API 的 POST 请求必须显式发送 `Content-Type: application/json`。部分 SpacetimeDB 版本不会接受省略 content type 或附带非预期 media type 的请求,即使 body 本身是合法 JSON,也会返回 `HTTP 415`。 + 如果你已经有可用的数据库连接 token,也可以显式传 `--token `。这种情况下脚本不会自动授权;该 token 对应的 identity 必须已经是迁移操作员。 正式导入前建议先加 `--dry-run`,确认 JSON 可解析、版本匹配、表名都在迁移白名单内。 diff --git a/scripts/spacetime-migration-common.mjs b/scripts/spacetime-migration-common.mjs index b65550c8..91e93e45 100644 --- a/scripts/spacetime-migration-common.mjs +++ b/scripts/spacetime-migration-common.mjs @@ -105,10 +105,11 @@ export async function callSpacetimeProcedure(options, procedureName, input) { const serverUrl = resolveServerUrl(options).replace(/\/+$/u, ''); const url = `${serverUrl}/v1/database/${encodeURIComponent(options.database)}/call/${encodeURIComponent(procedureName)}`; const headers = { - 'content-type': 'application/json; charset=utf-8', + Accept: 'application/json', + 'Content-Type': 'application/json', }; if (options.token) { - headers.authorization = `Bearer ${options.token}`; + headers.Authorization = `Bearer ${options.token}`; } let response; @@ -136,9 +137,13 @@ export async function callSpacetimeProcedure(options, procedureName, input) { export async function createSpacetimeWebIdentity(options) { const serverUrl = resolveServerUrl(options).replace(/\/+$/u, ''); const url = `${serverUrl}/v1/identity`; + const headers = { + Accept: 'application/json', + 'Content-Type': 'application/json', + }; let response; try { - response = await fetch(url, { method: 'POST' }); + response = await fetch(url, { method: 'POST', headers }); } catch (error) { throw new Error( `SpacetimeDB identity 请求失败: ${url}; ${error instanceof Error ? error.message : String(error)}`, From e9a6cd38f94793f82d44f6b805b85ba78dad7c86 Mon Sep 17 00:00:00 2001 From: kdletters Date: Mon, 27 Apr 2026 17:15:45 +0800 Subject: [PATCH 5/8] feat: add incremental spacetime migration import --- ...N_STRING_MIGRATION_PROCEDURE_2026-04-27.md | 30 ++++++- scripts/spacetime-import-migration-json.mjs | 63 +++++++++++++- scripts/spacetime-migration-common.mjs | 2 + .../crates/spacetime-module/src/migration.rs | 86 ++++++++++++++++--- 4 files changed, 166 insertions(+), 15 deletions(-) diff --git a/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md b/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md index 9dd88b5b..33e53949 100644 --- a/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md +++ b/docs/technical/SPACETIMEDB_JSON_STRING_MIGRATION_PROCEDURE_2026-04-27.md @@ -95,13 +95,21 @@ node scripts/spacetime-revoke-migration-operator.mjs \ `import_database_migration_from_file(ctx, input)` +`import_database_migration_incremental_from_file(ctx, input)` + 输入字段: - `migration_json`: 导出 procedure 生成的完整迁移 JSON 字符串。 - `include_tables`: 可选表名白名单。为空时导入文件内所有支持表。 -- `replace_existing`: 是否先清空目标表。跨服务器全量迁移必须为 `true`。 +- `replace_existing`: 是否先清空本次迁移文件内实际导入的目标表。不会清空迁移文件未包含的表;分批迁移时只覆盖当前批次。 - `dry_run`: 只解析和统计,不写表。 +导入模式: + +- 默认严格追加:不清空目标表,逐行插入;遇到主键或唯一约束冲突时失败并回滚,适合确认目标库没有同表旧数据时使用。 +- 增量追加:调用 `import_database_migration_incremental_from_file`,不清空目标表;遇到已存在或唯一约束冲突的行会跳过并计入 `skipped_row_count`,只插入目标库缺失的行。该模式不会更新目标库已有行。 +- 覆盖导入:`replace_existing = true` 时先删除覆盖范围内的目标表旧数据,再插入迁移文件中的数据;只适合迁移文件是这些表完整快照的场景。 + 返回字段: - `ok`: 是否成功。 @@ -152,10 +160,22 @@ node scripts/spacetime-import-migration-json.mjs \ --server maincloud \ --database xushi-p4wfr \ --bootstrap-secret <服务器目标库发布时输出的随机密钥> \ - --in tmp/spacetime-migrations/source-2026-04-27.json \ - --replace-existing + --in tmp/spacetime-migrations/source-2026-04-27.json ``` +如果目标库已有部分数据,且只想补充缺失行,使用增量模式: + +```bash +node scripts/spacetime-import-migration-json.mjs \ + --server maincloud \ + --database xushi-p4wfr \ + --bootstrap-secret <服务器目标库发布时输出的随机密钥> \ + --in tmp/spacetime-migrations/source-2026-04-27.json \ + --incremental +``` + +如果目标库对应表已有数据,并且本次文件应作为这些表的覆盖来源,再显式追加 `--replace-existing`。脚本会把覆盖范围限定为迁移文件内实际包含且本次会导入的表,避免分批导入时清空文件外的其它表。 + 默认情况下,脚本会自动完成三步: 1. `POST /v1/identity` 创建临时 Web API identity/token。 @@ -169,6 +189,10 @@ node scripts/spacetime-import-migration-json.mjs \ 正式导入前建议先加 `--dry-run`,确认 JSON 可解析、版本匹配、表名都在迁移白名单内。 +`--dry-run` 不会模拟目标库主键或唯一约束冲突,因此增量模式的 `skipped_row_count` 只有真实导入时才准确。 + +不要在只想追加数据时使用 `--replace-existing`。该参数会先删除覆盖范围内的目标表旧数据,再插入迁移文件中的数据;如果源文件不是完整快照,会造成目标表数据丢失。 + 如需分批迁移,可用逗号分隔表名: ```bash diff --git a/scripts/spacetime-import-migration-json.mjs b/scripts/spacetime-import-migration-json.mjs index 1f47fd74..ba869e3a 100644 --- a/scripts/spacetime-import-migration-json.mjs +++ b/scripts/spacetime-import-migration-json.mjs @@ -16,6 +16,9 @@ try { if (!options.in) { throw new Error('必须传入 --in。'); } + if (options.incremental === true && options.replaceExisting === true) { + throw new Error('--incremental 不能和 --replace-existing 同时使用。'); + } const inPath = path.resolve(options.in); await assertReadableFile(inPath); @@ -75,13 +78,69 @@ async function prepareWebImportOptions(options) { } async function importMigrationJsonDirect(options, migrationJson) { + const includeTables = resolveImportIncludeTables(options, migrationJson); + const procedureName = + options.incremental === true + ? 'import_database_migration_incremental_from_file' + : 'import_database_migration_from_file'; const input = { migration_json: migrationJson, - include_tables: options.includeTables, + include_tables: includeTables, replace_existing: options.replaceExisting === true, dry_run: options.dryRun === true, }; - return callSpacetimeProcedure(options, 'import_database_migration_from_file', input); + if (options.replaceExisting === true) { + console.log( + `[spacetime:migration:import] replace-existing 仅覆盖本次文件内的表: ${includeTables.join(', ') || '无'}`, + ); + } else if (options.incremental === true) { + console.log(`[spacetime:migration:import] 使用增量模式,已存在或冲突的行会跳过`); + } + return callSpacetimeProcedure(options, procedureName, input); +} + +function resolveImportIncludeTables(options, migrationJson) { + if (options.replaceExisting !== true) { + return options.includeTables; + } + + const migrationTables = readMigrationTableNames(migrationJson); + if (options.includeTables.length === 0) { + return migrationTables; + } + + const requestedTables = new Set(options.includeTables); + return migrationTables.filter((tableName) => requestedTables.has(tableName)); +} + +function readMigrationTableNames(migrationJson) { + let payload; + try { + payload = JSON.parse(migrationJson); + } catch (error) { + throw new Error( + `迁移文件 JSON 解析失败: ${error instanceof Error ? error.message : String(error)}`, + ); + } + + if (!payload || !Array.isArray(payload.tables)) { + throw new Error('迁移文件缺少 tables 数组。'); + } + + const tableNames = []; + const seen = new Set(); + for (const table of payload.tables) { + if (!table || typeof table.name !== 'string' || !table.name.trim()) { + throw new Error('迁移文件 tables 内存在缺少 name 的表项。'); + } + const tableName = table.name.trim(); + if (!seen.has(tableName)) { + tableNames.push(tableName); + seen.add(tableName); + } + } + + return tableNames; } async function revokeTemporaryWebIdentity(options) { diff --git a/scripts/spacetime-migration-common.mjs b/scripts/spacetime-migration-common.mjs index 91e93e45..1b1261d4 100644 --- a/scripts/spacetime-migration-common.mjs +++ b/scripts/spacetime-migration-common.mjs @@ -67,6 +67,8 @@ export function parseArgs(argv) { .filter(Boolean); } else if (arg === '--replace-existing') { options.replaceExisting = true; + } else if (arg === '--incremental') { + options.incremental = true; } else if (arg === '--dry-run') { options.dryRun = true; } else if (arg === '--anonymous' || arg === '--no-config') { diff --git a/server-rs/crates/spacetime-module/src/migration.rs b/server-rs/crates/spacetime-module/src/migration.rs index 1ff8b286..3a128daf 100644 --- a/server-rs/crates/spacetime-module/src/migration.rs +++ b/server-rs/crates/spacetime-module/src/migration.rs @@ -37,6 +37,12 @@ pub struct DatabaseMigrationImportInput { pub dry_run: bool, } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum DatabaseMigrationImportMode { + Strict, + Incremental, +} + #[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)] pub struct DatabaseMigrationAuthorizeOperatorInput { pub bootstrap_secret: String, @@ -252,7 +258,36 @@ pub fn import_database_migration_from_file( ctx: &mut ProcedureContext, input: DatabaseMigrationImportInput, ) -> DatabaseMigrationProcedureResult { - match import_database_migration_from_file_inner(ctx, input) { + match import_database_migration_from_file_inner(ctx, input, DatabaseMigrationImportMode::Strict) + { + Ok(stats) => DatabaseMigrationProcedureResult { + ok: true, + schema_version: MIGRATION_SCHEMA_VERSION, + migration_json: None, + table_stats: stats, + error_message: None, + }, + Err(error) => DatabaseMigrationProcedureResult { + ok: false, + schema_version: MIGRATION_SCHEMA_VERSION, + migration_json: None, + table_stats: Vec::new(), + error_message: Some(error), + }, + } +} + +// 增量导入只插入目标库缺失的行;主键或唯一约束冲突的行会跳过,不更新已有数据。 +#[spacetimedb::procedure] +pub fn import_database_migration_incremental_from_file( + ctx: &mut ProcedureContext, + input: DatabaseMigrationImportInput, +) -> DatabaseMigrationProcedureResult { + match import_database_migration_from_file_inner( + ctx, + input, + DatabaseMigrationImportMode::Incremental, + ) { Ok(stats) => DatabaseMigrationProcedureResult { ok: true, schema_version: MIGRATION_SCHEMA_VERSION, @@ -292,9 +327,13 @@ fn export_database_migration_to_file_inner( fn import_database_migration_from_file_inner( ctx: &mut ProcedureContext, input: DatabaseMigrationImportInput, + import_mode: DatabaseMigrationImportMode, ) -> Result, String> { let caller = ctx.sender(); let included_tables = normalize_include_tables(&input.include_tables)?; + if import_mode == DatabaseMigrationImportMode::Incremental && input.replace_existing { + return Err("增量导入不能同时启用 replace_existing".to_string()); + } if input.migration_json.trim().is_empty() { return Err("migration_json 不能为空".to_string()); } @@ -319,6 +358,7 @@ fn import_database_migration_from_file_inner( &migration_file, included_tables.as_ref(), input.replace_existing, + import_mode, ) })? }; @@ -555,6 +595,7 @@ fn apply_migration_file( migration_file: &MigrationFile, include_tables: Option<&HashSet>, replace_existing: bool, + import_mode: DatabaseMigrationImportMode, ) -> Result, String> { let mut stats = Vec::new(); for table in &migration_file.tables { @@ -563,8 +604,10 @@ fn apply_migration_file( } } + let import_table_names = build_import_table_name_set(migration_file, include_tables); if replace_existing { - clear_all_migration_tables!(ctx, include_tables); + // replace_existing 只覆盖本次迁移文件实际会导入的表,避免分批导入时误清空其它迁移白名单表。 + clear_all_migration_tables!(ctx, Some(&import_table_names)); } for table in &migration_file.tables { @@ -578,18 +621,31 @@ fn apply_migration_file( continue; } - let imported_row_count = insert_migration_table_rows(ctx, table)?; + let (imported_row_count, skipped_row_count) = + insert_migration_table_rows(ctx, table, import_mode)?; stats.push(DatabaseMigrationTableStat { table_name: table.name.clone(), exported_row_count: 0, imported_row_count, - skipped_row_count: 0, + skipped_row_count, }); } Ok(stats) } +fn build_import_table_name_set( + migration_file: &MigrationFile, + include_tables: Option<&HashSet>, +) -> HashSet { + migration_file + .tables + .iter() + .filter(|table| should_include_table(include_tables, &table.name)) + .map(|table| table.name.clone()) + .collect() +} + fn row_to_json(row: &T) -> Result { serde_json::to_value(SerializeWrapper::from_ref(row)) .map_err(|error| format!("迁移行序列化失败: {error}")) @@ -607,23 +663,33 @@ where fn insert_migration_table_rows( ctx: &ReducerContext, table: &MigrationTable, -) -> Result { + import_mode: DatabaseMigrationImportMode, +) -> Result<(u64, u64), String> { macro_rules! insert_table_match_arm { ($($table:ident),+ $(,)?) => { match table.name.as_str() { $( stringify!($table) => { let mut imported = 0u64; + let mut skipped = 0u64; for value in &table.rows { let row = row_from_json(value) .map_err(|error| format!("{}: {error}", stringify!($table)))?; - ctx.db + let insert_result = ctx.db .$table() - .try_insert(row) - .map_err(|error| format!("{} 导入失败: {error}", stringify!($table)))?; - imported = imported.saturating_add(1); + .try_insert(row); + match insert_result { + Ok(_) => imported = imported.saturating_add(1), + Err(error) => { + if import_mode == DatabaseMigrationImportMode::Incremental { + skipped = skipped.saturating_add(1); + } else { + return Err(format!("{} 导入失败: {error}", stringify!($table))); + } + } + } } - Ok(imported) + Ok((imported, skipped)) } )+ _ => Err(format!("迁移表不在白名单内: {}", table.name)), From 4ac4a0ca9c4b88533359a3134cab7f144a07b4bb Mon Sep 17 00:00:00 2001 From: kdletters Date: Mon, 27 Apr 2026 17:59:41 +0800 Subject: [PATCH 6/8] =?UTF-8?q?=E6=9B=B4=E6=96=B0AGENTS.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- AGENTS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/AGENTS.md b/AGENTS.md index 6a1458e6..706b4611 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -31,7 +31,7 @@ - 涉及前端或 Node 侧的 SpacetimeDB TypeScript SDK、订阅、绑定使用时,按 `spacetimedb-typescript` 与 `spacetimedb-concepts` 执行。 - 若仓库内旧实现或旧文档与这些 skill 冲突,先修正文档和方案,再继续编码。 - 修改后端代码后,必须使用 `npm run api-server:maincloud` 自动重新运行后端,并执行相应自动测试;不要再使用旧的后端重启命令。 - +- 数据库表结构更改后,需要对齐migration.rs ## 文档图谱 From dc619817a14ecdaff1a8296ec247a7776a5f0971 Mon Sep 17 00:00:00 2001 From: kdletters Date: Mon, 27 Apr 2026 18:17:26 +0800 Subject: [PATCH 7/8] fix: reset big fish initial creation progress --- ...SH_INITIAL_CREATION_PROGRESS_ZERO_FIX_2026-04-26.md | 4 ++++ .../crates/spacetime-module/src/big_fish/session.rs | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/docs/technical/PUZZLE_BIG_FISH_INITIAL_CREATION_PROGRESS_ZERO_FIX_2026-04-26.md b/docs/technical/PUZZLE_BIG_FISH_INITIAL_CREATION_PROGRESS_ZERO_FIX_2026-04-26.md index 9d595594..bae0c7d3 100644 --- a/docs/technical/PUZZLE_BIG_FISH_INITIAL_CREATION_PROGRESS_ZERO_FIX_2026-04-26.md +++ b/docs/technical/PUZZLE_BIG_FISH_INITIAL_CREATION_PROGRESS_ZERO_FIX_2026-04-26.md @@ -26,3 +26,7 @@ 2. 新建拼图创作工作区后,顶部创作进度显示 `0%`。 3. 发送第一条用户消息后,进度按模型回包或后续操作正常推进。 4. 生成草稿、生成资产、发布等后续阶段的进度值不受本次调整影响。 + +## 2026-04-27 复核记录 + +大鱼吃小鱼创建链路曾回退为 `progress_percent = 20`。本次重新将 `create_big_fish_session_tx` 的初始进度锁定为 `0`,并用 `INITIAL_BIG_FISH_CREATION_PROGRESS_PERCENT` 常量表达该约束,避免欢迎语或初始锚点占位再次被误计为创作进度。 diff --git a/server-rs/crates/spacetime-module/src/big_fish/session.rs b/server-rs/crates/spacetime-module/src/big_fish/session.rs index 81f2c5d8..28d5bdaa 100644 --- a/server-rs/crates/spacetime-module/src/big_fish/session.rs +++ b/server-rs/crates/spacetime-module/src/big_fish/session.rs @@ -1,6 +1,8 @@ use crate::big_fish::tables::{big_fish_agent_message, big_fish_creation_session}; use crate::*; +const INITIAL_BIG_FISH_CREATION_PROGRESS_PERCENT: u32 = 0; + #[spacetimedb::procedure] pub fn create_big_fish_session( ctx: &mut ProcedureContext, @@ -182,7 +184,8 @@ pub(crate) fn create_big_fish_session_tx( owner_user_id: input.owner_user_id.clone(), seed_text: input.seed_text.trim().to_string(), current_turn: 0, - progress_percent: 20, + // 中文注释:欢迎语和初始锚点只建立工作台上下文,不能提前抬高创作进度。 + progress_percent: INITIAL_BIG_FISH_CREATION_PROGRESS_PERCENT, stage: BigFishCreationStage::CollectingAnchors, anchor_pack_json: serialize_anchor_pack(&anchor_pack) .map_err(|error| error.to_string())?, @@ -707,6 +710,11 @@ mod tests { } } + #[test] + fn initial_big_fish_creation_progress_starts_from_zero() { + assert_eq!(INITIAL_BIG_FISH_CREATION_PROGRESS_PERCENT, 0); + } + #[test] fn big_fish_direct_work_content_ignores_empty_created_session() { let empty_session = From 2792df03a6a0179839a2e9280eaf062c29043a34 Mon Sep 17 00:00:00 2001 From: kdletters Date: Mon, 27 Apr 2026 21:12:43 +0800 Subject: [PATCH 8/8] Move big fish runtime to frontend --- ...FISH_DIRECTION_TOUCH_CONTROL_2026-04-24.md | 24 +- packages/shared/src/contracts/bigFish.ts | 4 - server-rs/crates/api-server/src/admin.rs | 1 - server-rs/crates/api-server/src/app.rs | 27 +- server-rs/crates/api-server/src/big_fish.rs | 172 +---- server-rs/crates/module-big-fish/src/lib.rs | 657 +----------------- .../crates/shared-contracts/src/big_fish.rs | 48 -- .../crates/spacetime-client/src/big_fish.rs | 72 -- server-rs/crates/spacetime-client/src/lib.rs | 6 +- .../crates/spacetime-client/src/mapper.rs | 124 ---- ...e_database_migration_operator_procedure.rs | 62 ++ .../big_fish_run_input_submit_input_type.rs | 19 - .../big_fish_runtime_run_type.rs | 82 --- .../big_fish_runtime_snapshot_type.rs | 31 - ...igration_authorize_operator_input_type.rs} | 13 +- ...> database_migration_export_input_type.rs} | 7 +- .../database_migration_import_input_type.rs | 18 + ...gration_operator_procedure_result_type.rs} | 8 +- .../database_migration_operator_type.rs | 59 ++ ...tabase_migration_procedure_result_type.rs} | 16 +- ...e_migration_revoke_operator_input_type.rs} | 7 +- ... => database_migration_table_stat_type.rs} | 12 +- ...rt_database_migration_to_file_procedure.rs | 59 ++ .../get_big_fish_run_procedure.rs | 59 -- ..._database_migration_from_file_procedure.rs | 59 ++ ...gration_incremental_from_file_procedure.rs | 59 ++ .../src/module_bindings/mod.rs | 50 +- ...e_database_migration_operator_procedure.rs | 59 ++ .../start_big_fish_run_procedure.rs | 59 -- .../submit_big_fish_input_procedure.rs | 59 -- .../spacetime-module/src/big_fish/mod.rs | 2 - .../spacetime-module/src/big_fish/runtime.rs | 198 ------ .../spacetime-module/src/big_fish/session.rs | 14 +- .../spacetime-module/src/big_fish/tables.rs | 19 - .../crates/spacetime-module/src/migration.rs | 3 +- .../BigFishRuntimeShell.test.tsx | 65 +- .../big-fish-runtime/BigFishRuntimeShell.tsx | 85 ++- .../PlatformEntryFlowShellImpl.tsx | 104 +-- ...gEntryFlowShell.agent.interaction.test.tsx | 60 +- .../big-fish-runtime/bigFishLocalRuntime.ts | 395 +++++++++++ .../big-fish-runtime/bigFishRuntimeClient.ts | 68 -- src/services/big-fish-runtime/index.ts | 8 +- 42 files changed, 1058 insertions(+), 1895 deletions(-) create mode 100644 server-rs/crates/spacetime-client/src/module_bindings/authorize_database_migration_operator_procedure.rs delete mode 100644 server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_input_submit_input_type.rs delete mode 100644 server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_run_type.rs delete mode 100644 server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_snapshot_type.rs rename server-rs/crates/spacetime-client/src/module_bindings/{big_fish_run_status_type.rs => database_migration_authorize_operator_input_type.rs} (64%) rename server-rs/crates/spacetime-client/src/module_bindings/{big_fish_run_get_input_type.rs => database_migration_export_input_type.rs} (74%) create mode 100644 server-rs/crates/spacetime-client/src/module_bindings/database_migration_import_input_type.rs rename server-rs/crates/spacetime-client/src/module_bindings/{big_fish_run_procedure_result_type.rs => database_migration_operator_procedure_result_type.rs} (68%) create mode 100644 server-rs/crates/spacetime-client/src/module_bindings/database_migration_operator_type.rs rename server-rs/crates/spacetime-client/src/module_bindings/{big_fish_runtime_entity_type.rs => database_migration_procedure_result_type.rs} (52%) rename server-rs/crates/spacetime-client/src/module_bindings/{big_fish_vector_2_type.rs => database_migration_revoke_operator_input_type.rs} (72%) rename server-rs/crates/spacetime-client/src/module_bindings/{big_fish_run_start_input_type.rs => database_migration_table_stat_type.rs} (64%) create mode 100644 server-rs/crates/spacetime-client/src/module_bindings/export_database_migration_to_file_procedure.rs delete mode 100644 server-rs/crates/spacetime-client/src/module_bindings/get_big_fish_run_procedure.rs create mode 100644 server-rs/crates/spacetime-client/src/module_bindings/import_database_migration_from_file_procedure.rs create mode 100644 server-rs/crates/spacetime-client/src/module_bindings/import_database_migration_incremental_from_file_procedure.rs create mode 100644 server-rs/crates/spacetime-client/src/module_bindings/revoke_database_migration_operator_procedure.rs delete mode 100644 server-rs/crates/spacetime-client/src/module_bindings/start_big_fish_run_procedure.rs delete mode 100644 server-rs/crates/spacetime-client/src/module_bindings/submit_big_fish_input_procedure.rs delete mode 100644 server-rs/crates/spacetime-module/src/big_fish/runtime.rs create mode 100644 src/services/big-fish-runtime/bigFishLocalRuntime.ts delete mode 100644 src/services/big-fish-runtime/bigFishRuntimeClient.ts diff --git a/docs/technical/BIG_FISH_DIRECTION_TOUCH_CONTROL_2026-04-24.md b/docs/technical/BIG_FISH_DIRECTION_TOUCH_CONTROL_2026-04-24.md index 3b4d3699..e92f43a9 100644 --- a/docs/technical/BIG_FISH_DIRECTION_TOUCH_CONTROL_2026-04-24.md +++ b/docs/technical/BIG_FISH_DIRECTION_TOUCH_CONTROL_2026-04-24.md @@ -2,26 +2,34 @@ ## 背景 -当前大鱼运行时使用左下固定虚拟摇杆,玩家必须点到摇杆区域才能移动。移动端实际体验应改为屏幕任意位置触控:第一次触点只建立方向原点,不直接产生移动;后续触点相对原点形成方向向量,角色按恒定速度朝该方向行动。 +当前大鱼运行时使用左下固定虚拟摇杆,玩家必须点到摇杆区域才能移动。移动端实际体验应改为屏幕任意位置触控:按住屏幕时不再用“触点相对按下原点”的距离判断方向,而是按固定采样间隔比较“上一次触点位置”和“当前触点位置”的位移方向,角色按恒定速度朝采样方向行动。 ## 交互规则 -1. 玩家在玩法舞台内按下时,记录第一个触点坐标为本次操作原点。 +1. 玩家在玩法舞台内按下时,记录当前触点坐标为采样起点。 2. 按下瞬间提交 `{ x: 0, y: 0 }`,保证一开始玩家不动。 -3. 手指/鼠标移动后,用“当前触点 - 原点”的向量计算方向。 -4. 输入只表达方向,不表达速度;超过死区后归一化为单位方向向量。 -5. 松开或取消触控后,清空操作原点并提交 `{ x: 0, y: 0 }`。 -6. 前端继续定时提交当前方向,即使没有玩家输入也提交零向量,让后端或本地直达局持续推进世界 tick。 +3. 按住期间每 `0.1` 秒采样一次当前触点坐标,并用“当前触点 - 上一次采样触点”的位移计算方向。 +4. 输入只表达方向,不表达速度;超过采样死区后归一化为单位方向向量,未超过死区则沿用上一段有效方向。 +5. 每次采样完成后,把当前触点写为下一次采样的“上一次位置”。 +6. 松开或取消触控后,清空采样状态并提交 `{ x: 0, y: 0 }`。 +7. 前端继续定时提交当前方向,即使没有玩家输入也提交零向量,让后端或本地直达局持续推进世界 tick。 ## 本地直达局边界 -- `/big-fish` 的本地占位局必须在玩家未操作时继续移动野生对象。 +- 大鱼吃小鱼的最终游玩部分统一放在前端本地 runtime,不再调用后端 start/input/get run 接口。 +- 后端只保留 Agent 创作、草稿编译、资产生成、发布和作品列表;不负责移动、碰撞、刷鱼、合成、屏外清理或胜负模拟。 +- `/big-fish` 的本地直达局和平台内测试玩法必须共用前端本地 runtime,并在玩家未操作时继续移动野生对象。 - 玩家速度保持恒定,只由方向决定移动方向。 - 野生对象使用确定性游动规则,避免直达入口看起来像静态截图。 ## 验收口径 1. 在舞台任意位置按下时玩家不立即移动。 -2. 按住并拖动后,玩家朝拖动方向恒速移动。 +2. 按住并拖动后,玩家方向来自最近 `0.1` 秒位移,而不是来自按下原点。 3. 松开后玩家停止。 4. 不操作时野生对象仍会持续游动。 + +## 资产生成补充口径 + +- 大鱼实体主图、`idle_float` 和 `move_swim` 动作图都按 RPG 角色资产口径处理:单体完整入镜、中心构图、轮廓清晰、透明背景、无 UI/文字/水印。 +- 场地背景只生成环境,不生成规则说明、UI 或巨大主体遮挡;画面元素要少,中央活动区域要大,边缘只保留少量出生区提示。 diff --git a/packages/shared/src/contracts/bigFish.ts b/packages/shared/src/contracts/bigFish.ts index a3248f14..4bedd2d1 100644 --- a/packages/shared/src/contracts/bigFish.ts +++ b/packages/shared/src/contracts/bigFish.ts @@ -185,7 +185,3 @@ export type BigFishRuntimeSnapshotResponse = { eventLog: string[]; updatedAt: string; }; - -export type BigFishRunResponse = { - run: BigFishRuntimeSnapshotResponse; -}; diff --git a/server-rs/crates/api-server/src/admin.rs b/server-rs/crates/api-server/src/admin.rs index 69489b8c..327687e0 100644 --- a/server-rs/crates/api-server/src/admin.rs +++ b/server-rs/crates/api-server/src/admin.rs @@ -65,7 +65,6 @@ const DATABASE_OVERVIEW_TABLES: &[&str] = &[ "big_fish_creation_session", "big_fish_agent_message", "big_fish_asset_slot", - "big_fish_runtime_run", "puzzle_work_profile", "puzzle_agent_session", "puzzle_agent_message", diff --git a/server-rs/crates/api-server/src/app.rs b/server-rs/crates/api-server/src/app.rs index 9446bcd4..3c00073b 100644 --- a/server-rs/crates/api-server/src/app.rs +++ b/server-rs/crates/api-server/src/app.rs @@ -33,9 +33,9 @@ use crate::{ auth_public_user::{get_public_user_by_code, get_public_user_by_id}, auth_sessions::auth_sessions, big_fish::{ - create_big_fish_session, delete_big_fish_work, execute_big_fish_action, get_big_fish_run, - get_big_fish_session, get_big_fish_works, list_big_fish_gallery, start_big_fish_run, - stream_big_fish_message, submit_big_fish_input, submit_big_fish_message, + create_big_fish_session, delete_big_fish_work, execute_big_fish_action, + get_big_fish_session, get_big_fish_works, list_big_fish_gallery, stream_big_fish_message, + submit_big_fish_message, }, character_animation_assets::{ generate_character_animation, get_character_animation_job, get_character_workflow_cache, @@ -574,27 +574,6 @@ pub fn build_router(state: AppState) -> Router { require_bearer_auth, )), ) - .route( - "/api/runtime/big-fish/sessions/{session_id}/runs", - post(start_big_fish_run).route_layer(middleware::from_fn_with_state( - state.clone(), - require_bearer_auth, - )), - ) - .route( - "/api/runtime/big-fish/runs/{run_id}", - get(get_big_fish_run).route_layer(middleware::from_fn_with_state( - state.clone(), - require_bearer_auth, - )), - ) - .route( - "/api/runtime/big-fish/runs/{run_id}/input", - post(submit_big_fish_input).route_layer(middleware::from_fn_with_state( - state.clone(), - require_bearer_auth, - )), - ) .route( "/api/runtime/puzzle/agent/sessions", post(create_puzzle_agent_session).route_layer(middleware::from_fn_with_state( diff --git a/server-rs/crates/api-server/src/big_fish.rs b/server-rs/crates/api-server/src/big_fish.rs index 3e4c53e6..8ab50e46 100644 --- a/server-rs/crates/api-server/src/big_fish.rs +++ b/server-rs/crates/api-server/src/big_fish.rs @@ -23,10 +23,8 @@ use shared_contracts::big_fish::{ BigFishActionResponse, BigFishAgentMessageResponse, BigFishAnchorItemResponse, BigFishAnchorPackResponse, BigFishAssetCoverageResponse, BigFishAssetSlotResponse, BigFishBackgroundBlueprintResponse, BigFishGameDraftResponse, BigFishLevelBlueprintResponse, - BigFishRunResponse, BigFishRuntimeEntityResponse, BigFishRuntimeParamsResponse, - BigFishRuntimeSnapshotResponse, BigFishSessionResponse, BigFishSessionSnapshotResponse, - BigFishVector2Response, CreateBigFishSessionRequest, ExecuteBigFishActionRequest, - SendBigFishMessageRequest, SubmitBigFishInputRequest, + BigFishRuntimeParamsResponse, BigFishSessionResponse, BigFishSessionSnapshotResponse, + CreateBigFishSessionRequest, ExecuteBigFishActionRequest, SendBigFishMessageRequest, }; use shared_contracts::big_fish_works::{BigFishWorkSummaryResponse, BigFishWorksResponse}; use shared_kernel::{build_prefixed_uuid_id, format_timestamp_micros}; @@ -34,10 +32,8 @@ use spacetime_client::{ BigFishAgentMessageRecord, BigFishAnchorItemRecord, BigFishAnchorPackRecord, BigFishAssetCoverageRecord, BigFishAssetGenerateRecordInput, BigFishAssetSlotRecord, BigFishBackgroundBlueprintRecord, BigFishGameDraftRecord, BigFishLevelBlueprintRecord, - BigFishMessageSubmitRecordInput, BigFishRunInputSubmitRecordInput, BigFishRunStartRecordInput, - BigFishRuntimeEntityRecord, BigFishRuntimeParamsRecord, BigFishRuntimeRecord, - BigFishSessionCreateRecordInput, BigFishSessionRecord, BigFishVector2Record, - BigFishWorkSummaryRecord, SpacetimeClientError, + BigFishMessageSubmitRecordInput, BigFishRuntimeParamsRecord, BigFishSessionCreateRecordInput, + BigFishSessionRecord, BigFishWorkSummaryRecord, SpacetimeClientError, }; use tokio::time::sleep; @@ -577,99 +573,6 @@ pub async fn execute_big_fish_action( )) } -pub async fn start_big_fish_run( - State(state): State, - Path(session_id): Path, - Extension(request_context): Extension, - Extension(authenticated): Extension, -) -> Result, Response> { - ensure_non_empty(&request_context, &session_id, "sessionId")?; - - let run = state - .spacetime_client() - .start_big_fish_run(BigFishRunStartRecordInput { - run_id: build_prefixed_uuid_id("big-fish-run-"), - session_id, - owner_user_id: authenticated.claims().user_id().to_string(), - started_at_micros: current_utc_micros(), - }) - .await - .map_err(|error| { - big_fish_error_response(&request_context, map_big_fish_client_error(error)) - })?; - - Ok(json_success_body( - Some(&request_context), - BigFishRunResponse { - run: map_big_fish_runtime_response(run), - }, - )) -} - -pub async fn get_big_fish_run( - State(state): State, - Path(run_id): Path, - Extension(request_context): Extension, - Extension(authenticated): Extension, -) -> Result, Response> { - ensure_non_empty(&request_context, &run_id, "runId")?; - - let run = state - .spacetime_client() - .get_big_fish_run(run_id, authenticated.claims().user_id().to_string()) - .await - .map_err(|error| { - big_fish_error_response(&request_context, map_big_fish_client_error(error)) - })?; - - Ok(json_success_body( - Some(&request_context), - BigFishRunResponse { - run: map_big_fish_runtime_response(run), - }, - )) -} - -pub async fn submit_big_fish_input( - State(state): State, - Path(run_id): Path, - Extension(request_context): Extension, - Extension(authenticated): Extension, - payload: Result, JsonRejection>, -) -> Result, Response> { - let Json(payload) = payload.map_err(|error| { - big_fish_error_response( - &request_context, - AppError::from_status(StatusCode::BAD_REQUEST).with_details(json!({ - "provider": "big-fish", - "message": error.body_text(), - })), - ) - })?; - ensure_non_empty(&request_context, &run_id, "runId")?; - - let run = state - .spacetime_client() - .submit_big_fish_input(BigFishRunInputSubmitRecordInput { - run_id, - owner_user_id: authenticated.claims().user_id().to_string(), - input_x: payload.x, - input_y: payload.y, - submitted_at_micros: current_utc_micros(), - }) - .await - .map_err(|error| { - big_fish_error_response(&request_context, map_big_fish_client_error(error)) - })?; - - Ok(json_success_body( - Some(&request_context), - BigFishRunResponse { - run: map_big_fish_runtime_response(run), - }, - )) -} - fn map_big_fish_session_response(session: BigFishSessionRecord) -> BigFishSessionSnapshotResponse { BigFishSessionSnapshotResponse { session_id: session.session_id, @@ -910,32 +813,6 @@ fn map_big_fish_agent_message_response( } } -fn map_big_fish_runtime_response(run: BigFishRuntimeRecord) -> BigFishRuntimeSnapshotResponse { - BigFishRuntimeSnapshotResponse { - run_id: run.run_id, - session_id: run.session_id, - status: run.status, - tick: run.tick, - player_level: run.player_level, - win_level: run.win_level, - leader_entity_id: run.leader_entity_id, - owned_entities: run - .owned_entities - .into_iter() - .map(map_big_fish_entity_response) - .collect(), - wild_entities: run - .wild_entities - .into_iter() - .map(map_big_fish_entity_response) - .collect(), - camera_center: map_big_fish_vector_response(run.camera_center), - last_input: map_big_fish_vector_response(run.last_input), - event_log: run.event_log, - updated_at: run.updated_at, - } -} - fn map_big_fish_work_summary_response( item: BigFishWorkSummaryRecord, ) -> BigFishWorkSummaryResponse { @@ -957,25 +834,6 @@ fn map_big_fish_work_summary_response( } } -fn map_big_fish_entity_response( - entity: BigFishRuntimeEntityRecord, -) -> BigFishRuntimeEntityResponse { - BigFishRuntimeEntityResponse { - entity_id: entity.entity_id, - level: entity.level, - position: map_big_fish_vector_response(entity.position), - radius: entity.radius, - offscreen_seconds: entity.offscreen_seconds, - } -} - -fn map_big_fish_vector_response(vector: BigFishVector2Record) -> BigFishVector2Response { - BigFishVector2Response { - x: vector.x, - y: vector.y, - } -} - fn build_big_fish_welcome_text(seed_text: &str) -> String { if seed_text.trim().is_empty() { return "我会先帮你确定大鱼吃小鱼的核心锚点。可以从主题生态、成长阶梯或风险节奏开始。" @@ -1013,7 +871,8 @@ struct BigFishFormalAssetContext { const BIG_FISH_TEXT_TO_IMAGE_MODEL: &str = "wan2.2-t2i-flash"; const BIG_FISH_ENTITY_KIND: &str = "big_fish_session"; -const BIG_FISH_DEFAULT_NEGATIVE_PROMPT: &str = "文字,水印,logo,UI界面,对话框,边框,多余肢体,畸形鱼体,低清晰度,模糊,压缩噪点,现代摄影棚,写实照片背景"; +const BIG_FISH_DEFAULT_NEGATIVE_PROMPT: &str = "文字,水印,logo,UI界面,对话框,边框,多余肢体,畸形鱼体,低清晰度,模糊,压缩噪点,现代摄影棚,写实照片背景,复杂背景"; +const BIG_FISH_TRANSPARENT_ASSET_NEGATIVE_PROMPT: &str = "文字,水印,logo,UI界面,对话框,边框,多余肢体,畸形鱼体,低清晰度,模糊,压缩噪点,现代摄影棚,写实照片背景,场景背景,水草背景,气泡背景,多只主体,阴影地面"; async fn generate_big_fish_formal_asset( state: &AppState, @@ -1087,7 +946,7 @@ fn build_big_fish_formal_asset_context( Ok(BigFishFormalAssetContext { entity_id: session.session_id.clone(), prompt: build_big_fish_level_main_image_prompt(draft, level), - negative_prompt: BIG_FISH_DEFAULT_NEGATIVE_PROMPT.to_string(), + negative_prompt: BIG_FISH_TRANSPARENT_ASSET_NEGATIVE_PROMPT.to_string(), size: "1024*1024".to_string(), asset_object_kind: "big_fish_level_main_image".to_string(), binding_slot: format!("level_main_image:{level_part}"), @@ -1114,7 +973,7 @@ fn build_big_fish_formal_asset_context( Ok(BigFishFormalAssetContext { entity_id: session.session_id.clone(), prompt: build_big_fish_level_motion_prompt(draft, level, motion_key), - negative_prompt: BIG_FISH_DEFAULT_NEGATIVE_PROMPT.to_string(), + negative_prompt: BIG_FISH_TRANSPARENT_ASSET_NEGATIVE_PROMPT.to_string(), size: "1024*1024".to_string(), asset_object_kind: "big_fish_level_motion".to_string(), binding_slot: format!("level_motion:{level_part}:{motion_key}"), @@ -1190,8 +1049,8 @@ fn build_big_fish_level_main_image_prompt( ), format!("轮廓方向:{}。", level.silhouette_direction), format!("视觉提示词种子:{}。", level.visual_prompt_seed), - "画面要求:单体游戏生物完整入镜,轮廓清晰,适合作为大鱼吃小鱼等级角色主图,2D 高完成度游戏插画,深海发光质感,中央构图。".to_string(), - "不要出现 UI、文字、logo、水印、对话框或边框;背景保持干净的深海渐变或透明感,不要出现多只主体。".to_string(), + "画面要求:按 RPG 角色资产口径生成,单体鱼形游戏生物完整入镜,轮廓清晰,中心构图,2D 高完成度游戏插画,深海发光质感。".to_string(), + "背景要求:透明背景 PNG 风格,不出现任何场景、水草、气泡、阴影地面、UI、文字、logo、水印、对话框或边框;不要出现多只主体。".to_string(), ] .join("") } @@ -1217,8 +1076,8 @@ fn build_big_fish_level_motion_prompt( ), format!("动作提示词种子:{}。", level.motion_prompt_seed), format!("动作要求:{motion_text}"), - "画面要求:单体生物完整入镜,轮廓清晰,动作方向明确,2D 高完成度游戏插画,适合作为 Big Fish 动作槽位的静态 keyframe。".to_string(), - "不要出现 UI、文字、logo、水印、对话框或边框;不要生成序列帧拼图,不要出现多只主体。".to_string(), + "画面要求:按 RPG 角色动画资产口径生成,单体鱼形生物完整入镜,轮廓清晰,动作方向明确,2D 高完成度游戏插画,适合作为 Big Fish 动作槽位的静态 keyframe。".to_string(), + "背景要求:透明背景 PNG 风格,不出现任何场景、水草、气泡、阴影地面、UI、文字、logo、水印、对话框或边框;不要生成序列帧拼图,不要出现多只主体。".to_string(), ] .join("") } @@ -1238,8 +1097,8 @@ fn build_big_fish_stage_background_prompt(draft: &BigFishGameDraftRecord) -> Str format!("安全操作区:{}。", background.safe_play_area_hint), format!("出生边缘:{}。", background.spawn_edge_hint), format!("背景提示词种子:{}。", background.background_prompt_seed), - "画面要求:竖屏 9:16,中央 70% 保持清爽可读,边缘有深海生态层次和微弱生物光,适合作为大鱼吃小鱼运行态背景。".to_string(), - "不要出现 UI、文字、logo、水印、对话框、边框或巨大主体遮挡;不要把中央操作区画得过暗或过复杂。".to_string(), + "画面要求:竖屏 9:16,大场地,全屏运行态背景,中央 80% 保持开阔清爽,边缘只保留少量出生区环境提示。".to_string(), + "元素要求:整体元素少,不出现大型主体、密集装饰、鱼群主角、UI、文字、logo、水印、对话框或边框;不要把中央操作区画得过暗或过复杂。".to_string(), ] .join("") } @@ -1769,8 +1628,7 @@ fn big_fish_sse_error_event_message(message: String) -> Event { fn map_big_fish_client_error(error: SpacetimeClientError) -> AppError { let status = match &error { SpacetimeClientError::Procedure(message) - if message.contains("big_fish_creation_session 不存在") - || message.contains("big_fish_runtime_run 不存在") => + if message.contains("big_fish_creation_session 不存在") => { StatusCode::NOT_FOUND } diff --git a/server-rs/crates/module-big-fish/src/lib.rs b/server-rs/crates/module-big-fish/src/lib.rs index 1dbb594f..adeec338 100644 --- a/server-rs/crates/module-big-fish/src/lib.rs +++ b/server-rs/crates/module-big-fish/src/lib.rs @@ -9,17 +9,12 @@ pub const BIG_FISH_SESSION_ID_PREFIX: &str = "big-fish-session-"; pub const BIG_FISH_MESSAGE_ID_PREFIX: &str = "big-fish-message-"; pub const BIG_FISH_OPERATION_ID_PREFIX: &str = "big-fish-operation-"; pub const BIG_FISH_ASSET_SLOT_ID_PREFIX: &str = "big-fish-asset-"; -pub const BIG_FISH_RUN_ID_PREFIX: &str = "big-fish-run-"; pub const BIG_FISH_DEFAULT_LEVEL_COUNT: u32 = 8; pub const BIG_FISH_MIN_LEVEL_COUNT: u32 = 6; pub const BIG_FISH_MAX_LEVEL_COUNT: u32 = 12; pub const BIG_FISH_MERGE_COUNT_PER_UPGRADE: u32 = 3; pub const BIG_FISH_OFFSCREEN_CULL_SECONDS: f32 = 3.0; pub const BIG_FISH_TARGET_WILD_COUNT: usize = 12; -pub const BIG_FISH_VIEW_WIDTH: f32 = 720.0; -pub const BIG_FISH_VIEW_HEIGHT: f32 = 1280.0; -pub const BIG_FISH_WORLD_HALF_WIDTH: f32 = 900.0; -pub const BIG_FISH_WORLD_HALF_HEIGHT: f32 = 1600.0; #[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -72,14 +67,6 @@ pub enum BigFishAssetStatus { Ready, } -#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum BigFishRunStatus { - Running, - Won, - Failed, -} - #[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct BigFishAnchorItem { @@ -209,41 +196,6 @@ pub struct BigFishSessionSnapshot { pub updated_at_micros: i64, } -#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BigFishVector2 { - pub x: f32, - pub y: f32, -} - -#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BigFishRuntimeEntity { - pub entity_id: String, - pub level: u32, - pub position: BigFishVector2, - pub radius: f32, - pub offscreen_seconds: f32, -} - -#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BigFishRuntimeSnapshot { - pub run_id: String, - pub session_id: String, - pub status: BigFishRunStatus, - pub tick: u64, - pub player_level: u32, - pub win_level: u32, - pub leader_entity_id: Option, - pub owned_entities: Vec, - pub wild_entities: Vec, - pub camera_center: BigFishVector2, - pub last_input: BigFishVector2, - pub event_log: Vec, - pub updated_at_micros: i64, -} - #[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BigFishSessionProcedureResult { @@ -293,14 +245,6 @@ pub struct BigFishWorksProcedureResult { pub error_message: Option, } -#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BigFishRunProcedureResult { - pub ok: bool, - pub run: Option, - pub error_message: Option, -} - #[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct BigFishSessionCreateInput { @@ -372,43 +316,15 @@ pub struct BigFishPublishInput { pub published_at_micros: i64, } -#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BigFishRunStartInput { - pub run_id: String, - pub session_id: String, - pub owner_user_id: String, - pub started_at_micros: i64, -} - -#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BigFishRunInputSubmitInput { - pub run_id: String, - pub owner_user_id: String, - pub input_x: f32, - pub input_y: f32, - pub submitted_at_micros: i64, -} - -#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))] -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BigFishRunGetInput { - pub run_id: String, - pub owner_user_id: String, -} - #[derive(Clone, Debug, PartialEq, Eq)] pub enum BigFishFieldError { MissingSessionId, MissingOwnerUserId, MissingMessageId, MissingMessageText, - MissingRunId, MissingDraft, InvalidLevel, InvalidAssetKind, - InvalidRunState, } impl BigFishCreationStage { @@ -474,16 +390,6 @@ impl BigFishAssetStatus { } } -impl BigFishRunStatus { - pub fn as_str(self) -> &'static str { - match self { - Self::Running => "running", - Self::Won => "won", - Self::Failed => "failed", - } - } -} - pub fn empty_anchor_pack() -> BigFishAnchorPack { BigFishAnchorPack { gameplay_promise: BigFishAnchorItem { @@ -565,12 +471,14 @@ pub fn compile_default_draft(anchor_pack: &BigFishAnchorPack) -> BigFishGameDraf background: BigFishBackgroundBlueprint { theme: theme.clone(), color_mood: "深蓝、青绿、带少量暖色生物光".to_string(), - foreground_hints: "轻微漂浮颗粒和边缘水草,不遮挡中央操作区".to_string(), - midground_composition: "中央留出清晰活动区域,边缘有出生缓冲层".to_string(), - background_depth: "纵深水域与远处体型剪影".to_string(), - safe_play_area_hint: "9:16 竖屏中央 70% 为主要活动区".to_string(), - spawn_edge_hint: "四周边缘作为野生实体出生区".to_string(), - background_prompt_seed: format!("{theme},竖屏 9:16,全屏游戏背景,无文字,无 UI 框"), + foreground_hints: "只保留少量漂浮颗粒和边缘水草,不遮挡中央操作区".to_string(), + midground_composition: "中央留出大面积清晰活动区域,边缘只做出生缓冲层".to_string(), + background_depth: "简洁纵深水域与极少量远处剪影".to_string(), + safe_play_area_hint: "9:16 竖屏中央 80% 为主要活动区".to_string(), + spawn_edge_hint: "四周边缘以少量暗礁或水草提示野生实体出生区".to_string(), + background_prompt_seed: format!( + "{theme},竖屏 9:16,全屏大场地游戏背景,元素少,中央开阔,无文字,无 UI 框" + ), }, runtime_params: BigFishRuntimeParams { level_count, @@ -673,77 +581,6 @@ pub fn build_generated_asset_slot( }) } -pub fn build_initial_runtime_snapshot( - run_id: String, - session_id: String, - draft: &BigFishGameDraft, - now_micros: i64, -) -> BigFishRuntimeSnapshot { - let mut snapshot = BigFishRuntimeSnapshot { - run_id, - session_id, - status: BigFishRunStatus::Running, - tick: 0, - player_level: 1, - win_level: draft.runtime_params.win_level, - leader_entity_id: Some("owned-1".to_string()), - owned_entities: vec![BigFishRuntimeEntity { - entity_id: "owned-1".to_string(), - level: 1, - position: BigFishVector2 { x: 0.0, y: 0.0 }, - radius: entity_radius(1), - offscreen_seconds: 0.0, - }], - wild_entities: vec![ - BigFishRuntimeEntity { - entity_id: "wild-open-1".to_string(), - level: 1, - position: BigFishVector2 { x: 72.0, y: 0.0 }, - radius: entity_radius(1), - offscreen_seconds: 0.0, - }, - BigFishRuntimeEntity { - entity_id: "wild-open-2".to_string(), - level: 1, - position: BigFishVector2 { x: -88.0, y: 30.0 }, - radius: entity_radius(1), - offscreen_seconds: 0.0, - }, - ], - camera_center: BigFishVector2 { x: 0.0, y: 0.0 }, - last_input: BigFishVector2 { x: 0.0, y: 0.0 }, - event_log: vec!["开局生成 2 个同级可收编目标".to_string()], - updated_at_micros: now_micros, - }; - maintain_wild_pool(&mut snapshot, &draft.runtime_params); - snapshot -} - -pub fn advance_runtime_snapshot( - mut snapshot: BigFishRuntimeSnapshot, - params: &BigFishRuntimeParams, - input_x: f32, - input_y: f32, - now_micros: i64, -) -> BigFishRuntimeSnapshot { - if snapshot.status != BigFishRunStatus::Running { - return snapshot; - } - - let step_seconds = resolve_step_seconds(&snapshot, now_micros); - snapshot.tick = snapshot.tick.saturating_add(1); - snapshot.last_input = normalize_input(input_x, input_y); - move_owned_entities(&mut snapshot, params, step_seconds); - resolve_collisions(&mut snapshot, params); - apply_chain_merges(&mut snapshot, params); - refresh_player_leader(&mut snapshot); - apply_win_or_fail(&mut snapshot, params); - update_wild_culling(&mut snapshot, params, step_seconds); - maintain_wild_pool(&mut snapshot, params); - snapshot.updated_at_micros = now_micros; - snapshot -} - pub fn validate_session_get_input(input: &BigFishSessionGetInput) -> Result<(), BigFishFieldError> { validate_session_owner(&input.session_id, &input.owner_user_id) } @@ -817,36 +654,6 @@ pub fn validate_publish_input(input: &BigFishPublishInput) -> Result<(), BigFish validate_session_owner(&input.session_id, &input.owner_user_id) } -pub fn validate_run_start_input(input: &BigFishRunStartInput) -> Result<(), BigFishFieldError> { - validate_session_owner(&input.session_id, &input.owner_user_id)?; - if normalize_required_string(&input.run_id).is_none() { - return Err(BigFishFieldError::MissingRunId); - } - Ok(()) -} - -pub fn validate_run_get_input(input: &BigFishRunGetInput) -> Result<(), BigFishFieldError> { - if normalize_required_string(&input.run_id).is_none() { - return Err(BigFishFieldError::MissingRunId); - } - if normalize_required_string(&input.owner_user_id).is_none() { - return Err(BigFishFieldError::MissingOwnerUserId); - } - Ok(()) -} - -pub fn validate_run_input_submit_input( - input: &BigFishRunInputSubmitInput, -) -> Result<(), BigFishFieldError> { - if normalize_required_string(&input.run_id).is_none() { - return Err(BigFishFieldError::MissingRunId); - } - if normalize_required_string(&input.owner_user_id).is_none() { - return Err(BigFishFieldError::MissingOwnerUserId); - } - Ok(()) -} - pub fn serialize_anchor_pack(anchor_pack: &BigFishAnchorPack) -> Result { serde_json::to_string(anchor_pack) } @@ -873,18 +680,6 @@ pub fn deserialize_asset_coverage(value: &str) -> Result Result { - serde_json::to_string(snapshot) -} - -pub fn deserialize_runtime_snapshot( - value: &str, -) -> Result { - serde_json::from_str(value) -} - fn fallback_anchor_value(anchor: &BigFishAnchorItem, fallback: &str) -> String { normalize_required_string(&anchor.value).unwrap_or_else(|| fallback.to_string()) } @@ -911,8 +706,12 @@ fn build_level_blueprint(level: u32, level_count: u32, theme: &str) -> BigFishLe 1.0 + level as f32 * 0.22 ), size_ratio: 1.0 + (level.saturating_sub(1) as f32 * 0.22), - visual_prompt_seed: format!("{theme} 第 {level} 级实体主图,透明背景,清晰轮廓"), - motion_prompt_seed: format!("{theme} 第 {level} 级实体 idle_float 与 move_swim 动作"), + visual_prompt_seed: format!( + "{theme} 第 {level} 级鱼形实体主图,RPG 角色资产口径,透明背景,单体完整入镜,清晰轮廓" + ), + motion_prompt_seed: format!( + "{theme} 第 {level} 级鱼形实体 idle_float 与 move_swim 动作,RPG 角色动画资产口径,透明背景" + ), merge_source_level: if level == 1 { None } else { Some(level - 1) }, prey_window, threat_window, @@ -945,7 +744,7 @@ fn build_asset_prompt_snapshot( .ok_or(BigFishFieldError::InvalidLevel)?; let motion_key = motion_key.ok_or(BigFishFieldError::InvalidAssetKind)?; Ok(format!( - "{},动作位:{}", + "{},动作位:{},透明背景,单体完整入镜", blueprint.motion_prompt_seed, motion_key )) } @@ -1004,311 +803,6 @@ fn validate_level(level: Option, draft: &BigFishGameDraft) -> Result<(), Bi } } -fn normalize_input(x: f32, y: f32) -> BigFishVector2 { - let length = (x * x + y * y).sqrt(); - if length <= 1.0 { - return BigFishVector2 { x, y }; - } - BigFishVector2 { - x: x / length, - y: y / length, - } -} - -/// 运行态仍由 `POST input` 触发推进,因此“屏外 3 秒”这类规则必须按真实秒数累计, -/// 否则会随着输入频率变化而漂移。 -fn resolve_step_seconds(snapshot: &BigFishRuntimeSnapshot, now_micros: i64) -> f32 { - ((now_micros - snapshot.updated_at_micros).max(0) as f32) / 1_000_000.0 -} - -fn move_owned_entities( - snapshot: &mut BigFishRuntimeSnapshot, - params: &BigFishRuntimeParams, - step_seconds: f32, -) { - let input = snapshot.last_input.clone(); - if let Some(leader) = snapshot.owned_entities.first_mut() { - leader.position.x = clamp_world( - leader.position.x + input.x * params.leader_move_speed * step_seconds, - true, - ); - leader.position.y = clamp_world( - leader.position.y + input.y * params.leader_move_speed * step_seconds, - false, - ); - snapshot.camera_center = leader.position.clone(); - } - - let leader_position = snapshot.camera_center.clone(); - for (index, follower) in snapshot.owned_entities.iter_mut().enumerate().skip(1) { - let slot_offset = ((index as f32) * 0.7).sin() * 36.0; - let target = BigFishVector2 { - x: leader_position.x - 42.0 - index as f32 * 8.0, - y: leader_position.y + slot_offset, - }; - let delta_x = target.x - follower.position.x; - let delta_y = target.y - follower.position.y; - let distance = (delta_x * delta_x + delta_y * delta_y).sqrt(); - if distance <= f32::EPSILON { - continue; - } - let catch_up_ratio = - (params.follower_catch_up_speed * step_seconds / distance).clamp(0.0, 1.0); - follower.position.x += delta_x * catch_up_ratio; - follower.position.y += delta_y * catch_up_ratio; - } -} - -fn resolve_collisions(snapshot: &mut BigFishRuntimeSnapshot, _params: &BigFishRuntimeParams) { - let mut owned_to_remove = Vec::new(); - let mut wild_to_remove = Vec::new(); - let mut newly_owned = Vec::new(); - - for (owned_index, owned) in snapshot.owned_entities.iter().enumerate() { - for (wild_index, wild) in snapshot.wild_entities.iter().enumerate() { - if wild_to_remove.contains(&wild_index) || owned_to_remove.contains(&owned_index) { - continue; - } - if distance(&owned.position, &wild.position) > owned.radius + wild.radius { - continue; - } - - if owned.level >= wild.level { - wild_to_remove.push(wild_index); - newly_owned.push(BigFishRuntimeEntity { - entity_id: format!("owned-from-{}-{}", wild.entity_id, snapshot.tick), - level: wild.level, - position: wild.position.clone(), - radius: entity_radius(wild.level), - offscreen_seconds: 0.0, - }); - snapshot - .event_log - .push(format!("收编 {} 级实体", wild.level)); - } else { - owned_to_remove.push(owned_index); - snapshot.event_log.push(format!( - "{} 级己方实体被 {} 级野生实体吃掉", - owned.level, wild.level - )); - } - } - } - - remove_indices(&mut snapshot.wild_entities, &wild_to_remove); - remove_indices(&mut snapshot.owned_entities, &owned_to_remove); - snapshot.owned_entities.extend(newly_owned); -} - -fn apply_chain_merges(snapshot: &mut BigFishRuntimeSnapshot, params: &BigFishRuntimeParams) { - loop { - let mut merged = false; - for level in 1..params.win_level { - let indices = snapshot - .owned_entities - .iter() - .enumerate() - .filter_map(|(index, entity)| (entity.level == level).then_some(index)) - .take(params.merge_count_per_upgrade as usize) - .collect::>(); - if indices.len() < params.merge_count_per_upgrade as usize { - continue; - } - - let center = average_position(&indices, &snapshot.owned_entities); - remove_indices(&mut snapshot.owned_entities, &indices); - snapshot.owned_entities.push(BigFishRuntimeEntity { - entity_id: format!("owned-merge-{}-{}", level + 1, snapshot.tick), - level: level + 1, - position: center, - radius: entity_radius(level + 1), - offscreen_seconds: 0.0, - }); - snapshot - .event_log - .push(format!("3 个 {} 级实体合成 {} 级", level, level + 1)); - merged = true; - break; - } - - if !merged { - break; - } - } -} - -fn refresh_player_leader(snapshot: &mut BigFishRuntimeSnapshot) { - snapshot.owned_entities.sort_by(|left, right| { - right - .level - .cmp(&left.level) - .then_with(|| { - distance(&left.position, &snapshot.camera_center) - .partial_cmp(&distance(&right.position, &snapshot.camera_center)) - .unwrap_or(std::cmp::Ordering::Equal) - }) - .then_with(|| left.entity_id.cmp(&right.entity_id)) - }); - snapshot.leader_entity_id = snapshot - .owned_entities - .first() - .map(|entity| entity.entity_id.clone()); - snapshot.player_level = snapshot - .owned_entities - .iter() - .map(|entity| entity.level) - .max() - .unwrap_or(0); - if let Some(leader) = snapshot.owned_entities.first() { - snapshot.camera_center = leader.position.clone(); - } -} - -fn apply_win_or_fail(snapshot: &mut BigFishRuntimeSnapshot, params: &BigFishRuntimeParams) { - if snapshot.owned_entities.is_empty() { - snapshot.status = BigFishRunStatus::Failed; - snapshot - .event_log - .push("己方实体归零,本局失败".to_string()); - return; - } - if snapshot.player_level >= params.win_level { - snapshot.status = BigFishRunStatus::Won; - snapshot - .event_log - .push("获得最高等级实体,通关".to_string()); - } -} - -fn update_wild_culling( - snapshot: &mut BigFishRuntimeSnapshot, - params: &BigFishRuntimeParams, - step_seconds: f32, -) { - let player_level = snapshot.player_level; - for wild in &mut snapshot.wild_entities { - let should_cull_level = wild.level == player_level - || wild.level >= player_level.saturating_add(3) - || wild.level.saturating_add(3) <= player_level; - if !should_cull_level { - wild.offscreen_seconds = 0.0; - continue; - } - - if is_offscreen(&wild.position, &snapshot.camera_center, wild.radius) { - wild.offscreen_seconds += step_seconds; - } else { - wild.offscreen_seconds = 0.0; - } - } - snapshot - .wild_entities - .retain(|wild| wild.offscreen_seconds < params.offscreen_cull_seconds); -} - -fn maintain_wild_pool(snapshot: &mut BigFishRuntimeSnapshot, params: &BigFishRuntimeParams) { - if snapshot.status != BigFishRunStatus::Running { - return; - } - let mut next_index = snapshot.wild_entities.len() + snapshot.tick as usize; - while snapshot.wild_entities.len() < params.spawn_target_count as usize { - let level = next_spawn_level(snapshot.player_level.max(1), params.win_level, next_index); - snapshot.wild_entities.push(BigFishRuntimeEntity { - entity_id: format!("wild-{}-{}", snapshot.tick, next_index), - level, - position: spawn_position(&snapshot.camera_center, next_index), - radius: entity_radius(level), - offscreen_seconds: 0.0, - }); - next_index += 1; - } -} - -fn next_spawn_level(player_level: u32, win_level: u32, index: usize) -> u32 { - if player_level == 1 && index % 4 < 2 { - return 1; - } - let deltas = [-2_i32, -1, 1, 2]; - let delta = deltas[index % deltas.len()]; - (player_level as i32 + delta).clamp(1, win_level as i32) as u32 -} - -fn spawn_position(center: &BigFishVector2, index: usize) -> BigFishVector2 { - let side = index % 4; - let offset = ((index as f32 * 37.0) % 420.0) - 210.0; - match side { - 0 => BigFishVector2 { - x: center.x - BIG_FISH_VIEW_WIDTH * 0.62, - y: center.y + offset, - }, - 1 => BigFishVector2 { - x: center.x + BIG_FISH_VIEW_WIDTH * 0.62, - y: center.y + offset, - }, - 2 => BigFishVector2 { - x: center.x + offset, - y: center.y - BIG_FISH_VIEW_HEIGHT * 0.58, - }, - _ => BigFishVector2 { - x: center.x + offset, - y: center.y + BIG_FISH_VIEW_HEIGHT * 0.58, - }, - } -} - -fn remove_indices(items: &mut Vec, indices: &[usize]) { - let mut sorted = indices.to_vec(); - sorted.sort_unstable(); - sorted.dedup(); - for index in sorted.into_iter().rev() { - if index < items.len() { - items.remove(index); - } - } -} - -fn average_position(indices: &[usize], entities: &[BigFishRuntimeEntity]) -> BigFishVector2 { - let mut x = 0.0; - let mut y = 0.0; - for index in indices { - x += entities[*index].position.x; - y += entities[*index].position.y; - } - let count = indices.len().max(1) as f32; - BigFishVector2 { - x: x / count, - y: y / count, - } -} - -fn distance(left: &BigFishVector2, right: &BigFishVector2) -> f32 { - let dx = left.x - right.x; - let dy = left.y - right.y; - (dx * dx + dy * dy).sqrt() -} - -fn is_offscreen(position: &BigFishVector2, camera: &BigFishVector2, radius: f32) -> bool { - let half_w = BIG_FISH_VIEW_WIDTH / 2.0; - let half_h = BIG_FISH_VIEW_HEIGHT / 2.0; - position.x + radius < camera.x - half_w - || position.x - radius > camera.x + half_w - || position.y + radius < camera.y - half_h - || position.y - radius > camera.y + half_h -} - -fn clamp_world(value: f32, horizontal: bool) -> f32 { - let limit = if horizontal { - BIG_FISH_WORLD_HALF_WIDTH - } else { - BIG_FISH_WORLD_HALF_HEIGHT - }; - value.clamp(-limit, limit) -} - -fn entity_radius(level: u32) -> f32 { - 18.0 + level as f32 * 4.0 -} - impl fmt::Display for BigFishFieldError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -1316,11 +810,9 @@ impl fmt::Display for BigFishFieldError { Self::MissingOwnerUserId => f.write_str("big_fish.owner_user_id 不能为空"), Self::MissingMessageId => f.write_str("big_fish.message_id 不能为空"), Self::MissingMessageText => f.write_str("big_fish.message_text 不能为空"), - Self::MissingRunId => f.write_str("big_fish.run_id 不能为空"), Self::MissingDraft => f.write_str("big_fish.draft 尚未编译"), Self::InvalidLevel => f.write_str("big_fish.level 不在合法等级范围内"), Self::InvalidAssetKind => f.write_str("big_fish.asset_kind 或动作位非法"), - Self::InvalidRunState => f.write_str("big_fish.run 当前状态不允许推进"), } } } @@ -1370,123 +862,4 @@ mod tests { assert!(coverage.blockers.iter().any(|item| item.contains("背景图"))); } - #[test] - fn same_level_wild_entity_can_be_collected_at_start() { - let draft = compile_default_draft(&infer_anchor_pack("深海", None)); - let mut snapshot = - build_initial_runtime_snapshot("run-1".to_string(), "session-1".to_string(), &draft, 1); - snapshot.wild_entities[0].position = BigFishVector2 { x: 1.0, y: 0.0 }; - - let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 2); - - assert!(next.owned_entities.len() >= 2); - assert!( - next.event_log - .iter() - .any(|event| event.contains("收编 1 级实体")) - ); - } - - #[test] - fn three_owned_entities_merge_into_next_level() { - let draft = compile_default_draft(&infer_anchor_pack("深海", None)); - let mut snapshot = build_initial_runtime_snapshot( - "run-merge".to_string(), - "session-merge".to_string(), - &draft, - 1, - ); - snapshot.wild_entities.clear(); - snapshot.owned_entities.push(BigFishRuntimeEntity { - entity_id: "owned-2".to_string(), - level: 1, - position: BigFishVector2 { x: 4.0, y: 0.0 }, - radius: entity_radius(1), - offscreen_seconds: 0.0, - }); - snapshot.owned_entities.push(BigFishRuntimeEntity { - entity_id: "owned-3".to_string(), - level: 1, - position: BigFishVector2 { x: 8.0, y: 0.0 }, - radius: entity_radius(1), - offscreen_seconds: 0.0, - }); - - let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 2); - - assert!(next.owned_entities.iter().any(|entity| entity.level == 2)); - } - - #[test] - fn final_level_immediately_wins() { - let draft = compile_default_draft(&infer_anchor_pack("深海", None)); - let mut snapshot = build_initial_runtime_snapshot( - "run-win".to_string(), - "session-win".to_string(), - &draft, - 1, - ); - snapshot.owned_entities[0].level = draft.runtime_params.win_level; - - let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 2); - - assert_eq!(next.status, BigFishRunStatus::Won); - } - - #[test] - fn offscreen_same_level_wild_entity_is_removed_after_three_seconds() { - let draft = compile_default_draft(&infer_anchor_pack("深海", None)); - let mut snapshot = build_initial_runtime_snapshot( - "run-cull".to_string(), - "session-cull".to_string(), - &draft, - 1, - ); - snapshot.wild_entities.clear(); - snapshot.wild_entities.push(BigFishRuntimeEntity { - entity_id: "wild-cull".to_string(), - level: 1, - position: BigFishVector2 { x: 1000.0, y: 0.0 }, - radius: entity_radius(1), - offscreen_seconds: 2.8, - }); - snapshot.updated_at_micros = 1_000_000; - - let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 1_250_000); - - assert!( - !next - .wild_entities - .iter() - .any(|entity| entity.entity_id == "wild-cull") - ); - } - - #[test] - fn offscreen_same_level_wild_entity_is_kept_before_three_seconds_elapsed() { - let draft = compile_default_draft(&infer_anchor_pack("深海", None)); - let mut snapshot = build_initial_runtime_snapshot( - "run-cull-safe".to_string(), - "session-cull-safe".to_string(), - &draft, - 1, - ); - snapshot.wild_entities.clear(); - snapshot.wild_entities.push(BigFishRuntimeEntity { - entity_id: "wild-cull-safe".to_string(), - level: 1, - position: BigFishVector2 { x: 1000.0, y: 0.0 }, - radius: entity_radius(1), - offscreen_seconds: 2.7, - }); - snapshot.updated_at_micros = 1_000_000; - - let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 1_200_000); - - assert!( - next.wild_entities - .iter() - .any(|entity| entity.entity_id == "wild-cull-safe") - ); - } } diff --git a/server-rs/crates/shared-contracts/src/big_fish.rs b/server-rs/crates/shared-contracts/src/big_fish.rs index 282c163e..49a69dca 100644 --- a/server-rs/crates/shared-contracts/src/big_fish.rs +++ b/server-rs/crates/shared-contracts/src/big_fish.rs @@ -26,13 +26,6 @@ pub struct ExecuteBigFishActionRequest { pub motion_key: Option, } -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct SubmitBigFishInputRequest { - pub x: f32, - pub y: f32, -} - #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct BigFishAnchorItemResponse { @@ -169,47 +162,6 @@ pub struct BigFishActionResponse { pub session: BigFishSessionSnapshotResponse, } -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct BigFishVector2Response { - pub x: f32, - pub y: f32, -} - -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct BigFishRuntimeEntityResponse { - pub entity_id: String, - pub level: u32, - pub position: BigFishVector2Response, - pub radius: f32, - pub offscreen_seconds: f32, -} - -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct BigFishRuntimeSnapshotResponse { - pub run_id: String, - pub session_id: String, - pub status: String, - pub tick: u64, - pub player_level: u32, - pub win_level: u32, - pub leader_entity_id: Option, - pub owned_entities: Vec, - pub wild_entities: Vec, - pub camera_center: BigFishVector2Response, - pub last_input: BigFishVector2Response, - pub event_log: Vec, - pub updated_at: String, -} - -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct BigFishRunResponse { - pub run: BigFishRuntimeSnapshotResponse, -} - #[cfg(test)] mod tests { use super::*; diff --git a/server-rs/crates/spacetime-client/src/big_fish.rs b/server-rs/crates/spacetime-client/src/big_fish.rs index 168d9c48..01311d6b 100644 --- a/server-rs/crates/spacetime-client/src/big_fish.rs +++ b/server-rs/crates/spacetime-client/src/big_fish.rs @@ -251,76 +251,4 @@ impl SpacetimeClient { .await } - pub async fn start_big_fish_run( - &self, - input: BigFishRunStartRecordInput, - ) -> Result { - let procedure_input = BigFishRunStartInput { - run_id: input.run_id, - session_id: input.session_id, - owner_user_id: input.owner_user_id, - started_at_micros: input.started_at_micros, - }; - - self.call_after_connect(move |connection, sender| { - connection - .procedures() - .start_big_fish_run_then(procedure_input, move |_, result| { - let mapped = result - .map_err(|error| SpacetimeClientError::Procedure(error.to_string())) - .and_then(map_big_fish_run_procedure_result); - send_once(&sender, mapped); - }); - }) - .await - } - - pub async fn submit_big_fish_input( - &self, - input: BigFishRunInputSubmitRecordInput, - ) -> Result { - let procedure_input = BigFishRunInputSubmitInput { - run_id: input.run_id, - owner_user_id: input.owner_user_id, - input_x: input.input_x, - input_y: input.input_y, - submitted_at_micros: input.submitted_at_micros, - }; - - self.call_after_connect(move |connection, sender| { - connection.procedures().submit_big_fish_input_then( - procedure_input, - move |_, result| { - let mapped = result - .map_err(|error| SpacetimeClientError::Procedure(error.to_string())) - .and_then(map_big_fish_run_procedure_result); - send_once(&sender, mapped); - }, - ); - }) - .await - } - - pub async fn get_big_fish_run( - &self, - run_id: String, - owner_user_id: String, - ) -> Result { - let procedure_input = BigFishRunGetInput { - run_id, - owner_user_id, - }; - - self.call_after_connect(move |connection, sender| { - connection - .procedures() - .get_big_fish_run_then(procedure_input, move |_, result| { - let mapped = result - .map_err(|error| SpacetimeClientError::Procedure(error.to_string())) - .and_then(map_big_fish_run_procedure_result); - send_once(&sender, mapped); - }); - }) - .await - } } diff --git a/server-rs/crates/spacetime-client/src/lib.rs b/server-rs/crates/spacetime-client/src/lib.rs index a2379f1f..cdcf785e 100644 --- a/server-rs/crates/spacetime-client/src/lib.rs +++ b/server-rs/crates/spacetime-client/src/lib.rs @@ -10,10 +10,8 @@ pub use mapper::{ BigFishAnchorPackRecord, BigFishAssetCoverageRecord, BigFishAssetGenerateRecordInput, BigFishAssetSlotRecord, BigFishBackgroundBlueprintRecord, BigFishGameDraftRecord, BigFishLevelBlueprintRecord, BigFishMessageFinalizeRecordInput, - BigFishMessageSubmitRecordInput, BigFishRunInputSubmitRecordInput, BigFishRunStartRecordInput, - BigFishRuntimeEntityRecord, BigFishRuntimeParamsRecord, BigFishRuntimeRecord, - BigFishSessionCreateRecordInput, BigFishSessionRecord, BigFishVector2Record, - BigFishWorkSummaryRecord, CustomWorldAgentActionExecuteRecord, + BigFishMessageSubmitRecordInput, BigFishRuntimeParamsRecord, BigFishSessionCreateRecordInput, + BigFishSessionRecord, BigFishWorkSummaryRecord, CustomWorldAgentActionExecuteRecord, CustomWorldAgentActionExecuteRecordInput, CustomWorldAgentCheckpointRecord, CustomWorldAgentMessageFinalizeRecordInput, CustomWorldAgentMessageRecord, CustomWorldAgentMessageSubmitRecordInput, CustomWorldAgentOperationProgressRecordInput, diff --git a/server-rs/crates/spacetime-client/src/mapper.rs b/server-rs/crates/spacetime-client/src/mapper.rs index fc57f93a..8c423c72 100644 --- a/server-rs/crates/spacetime-client/src/mapper.rs +++ b/server-rs/crates/spacetime-client/src/mapper.rs @@ -1262,26 +1262,6 @@ pub(crate) fn map_big_fish_works_procedure_result( }) } -pub(crate) fn map_big_fish_run_procedure_result( - result: BigFishRunProcedureResult, -) -> Result { - if !result.ok { - return Err(SpacetimeClientError::Procedure( - result - .error_message - .unwrap_or_else(|| "SpacetimeDB procedure 返回未知错误".to_string()), - )); - } - - let run = result.run.ok_or_else(|| { - SpacetimeClientError::Procedure( - "SpacetimeDB procedure 未返回 big fish runtime 快照".to_string(), - ) - })?; - - Ok(map_big_fish_runtime_snapshot(run)) -} - pub(crate) fn map_story_session_procedure_result( result: StorySessionProcedureResult, ) -> Result { @@ -2468,53 +2448,6 @@ pub(crate) fn map_big_fish_agent_message_snapshot( } } -pub(crate) fn map_big_fish_runtime_snapshot( - snapshot: BigFishRuntimeSnapshot, -) -> BigFishRuntimeRecord { - BigFishRuntimeRecord { - run_id: snapshot.run_id, - session_id: snapshot.session_id, - status: format_big_fish_run_status(snapshot.status).to_string(), - tick: snapshot.tick, - player_level: snapshot.player_level, - win_level: snapshot.win_level, - leader_entity_id: snapshot.leader_entity_id, - owned_entities: snapshot - .owned_entities - .into_iter() - .map(map_big_fish_runtime_entity) - .collect(), - wild_entities: snapshot - .wild_entities - .into_iter() - .map(map_big_fish_runtime_entity) - .collect(), - camera_center: map_big_fish_vector2(snapshot.camera_center), - last_input: map_big_fish_vector2(snapshot.last_input), - event_log: snapshot.event_log, - updated_at: format_timestamp_micros(snapshot.updated_at_micros), - } -} - -pub(crate) fn map_big_fish_runtime_entity( - snapshot: BigFishRuntimeEntity, -) -> BigFishRuntimeEntityRecord { - BigFishRuntimeEntityRecord { - entity_id: snapshot.entity_id, - level: snapshot.level, - position: map_big_fish_vector2(snapshot.position), - radius: snapshot.radius, - offscreen_seconds: snapshot.offscreen_seconds, - } -} - -pub(crate) fn map_big_fish_vector2(snapshot: BigFishVector2) -> BigFishVector2Record { - BigFishVector2Record { - x: snapshot.x, - y: snapshot.y, - } -} - pub(crate) fn map_story_session_snapshot(snapshot: StorySessionSnapshot) -> StorySessionRecord { StorySessionRecord { story_session_id: snapshot.story_session_id, @@ -3220,14 +3153,6 @@ pub(crate) fn format_big_fish_asset_status(value: BigFishAssetStatus) -> &'stati } } -pub(crate) fn format_big_fish_run_status(value: BigFishRunStatus) -> &'static str { - match value { - BigFishRunStatus::Running => "running", - BigFishRunStatus::Won => "won", - BigFishRunStatus::Failed => "failed", - } -} - pub(crate) fn format_custom_world_theme_mode(value: DomainCustomWorldThemeMode) -> &'static str { match value { DomainCustomWorldThemeMode::Martial => "martial", @@ -4455,23 +4380,6 @@ pub struct BigFishAssetGenerateRecordInput { pub generated_at_micros: i64, } -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct BigFishRunStartRecordInput { - pub run_id: String, - pub session_id: String, - pub owner_user_id: String, - pub started_at_micros: i64, -} - -#[derive(Clone, Debug, PartialEq)] -pub struct BigFishRunInputSubmitRecordInput { - pub run_id: String, - pub owner_user_id: String, - pub input_x: f32, - pub input_y: f32, - pub submitted_at_micros: i64, -} - #[derive(Clone, Debug, PartialEq, Eq)] pub struct BigFishAnchorItemRecord { pub key: String, @@ -4604,38 +4512,6 @@ pub struct BigFishWorkSummaryRecord { pub background_ready: bool, } -#[derive(Clone, Debug, PartialEq)] -pub struct BigFishVector2Record { - pub x: f32, - pub y: f32, -} - -#[derive(Clone, Debug, PartialEq)] -pub struct BigFishRuntimeEntityRecord { - pub entity_id: String, - pub level: u32, - pub position: BigFishVector2Record, - pub radius: f32, - pub offscreen_seconds: f32, -} - -#[derive(Clone, Debug, PartialEq)] -pub struct BigFishRuntimeRecord { - pub run_id: String, - pub session_id: String, - pub status: String, - pub tick: u64, - pub player_level: u32, - pub win_level: u32, - pub leader_entity_id: Option, - pub owned_entities: Vec, - pub wild_entities: Vec, - pub camera_center: BigFishVector2Record, - pub last_input: BigFishVector2Record, - pub event_log: Vec, - pub updated_at: String, -} - #[derive(Clone, Debug, PartialEq, Eq)] pub struct ResolveNpcBattleInteractionInput { pub npc_interaction: DomainResolveNpcInteractionInput, diff --git a/server-rs/crates/spacetime-client/src/module_bindings/authorize_database_migration_operator_procedure.rs b/server-rs/crates/spacetime-client/src/module_bindings/authorize_database_migration_operator_procedure.rs new file mode 100644 index 00000000..b5885022 --- /dev/null +++ b/server-rs/crates/spacetime-client/src/module_bindings/authorize_database_migration_operator_procedure.rs @@ -0,0 +1,62 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::database_migration_authorize_operator_input_type::DatabaseMigrationAuthorizeOperatorInput; +use super::database_migration_operator_procedure_result_type::DatabaseMigrationOperatorProcedureResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct AuthorizeDatabaseMigrationOperatorArgs { + pub input: DatabaseMigrationAuthorizeOperatorInput, +} + +impl __sdk::InModule for AuthorizeDatabaseMigrationOperatorArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `authorize_database_migration_operator`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait authorize_database_migration_operator { + fn authorize_database_migration_operator( + &self, + input: DatabaseMigrationAuthorizeOperatorInput, + ) { + self.authorize_database_migration_operator_then(input, |_, _| {}); + } + + fn authorize_database_migration_operator_then( + &self, + input: DatabaseMigrationAuthorizeOperatorInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ); +} + +impl authorize_database_migration_operator for super::RemoteProcedures { + fn authorize_database_migration_operator_then( + &self, + input: DatabaseMigrationAuthorizeOperatorInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, DatabaseMigrationOperatorProcedureResult>( + "authorize_database_migration_operator", + AuthorizeDatabaseMigrationOperatorArgs { input }, + __callback, + ); + } +} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_input_submit_input_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_input_submit_input_type.rs deleted file mode 100644 index f3175d40..00000000 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_input_submit_input_type.rs +++ /dev/null @@ -1,19 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -pub struct BigFishRunInputSubmitInput { - pub run_id: String, - pub owner_user_id: String, - pub input_x: f32, - pub input_y: f32, - pub submitted_at_micros: i64, -} - -impl __sdk::InModule for BigFishRunInputSubmitInput { - type Module = super::RemoteModule; -} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_run_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_run_type.rs deleted file mode 100644 index 01852b9c..00000000 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_run_type.rs +++ /dev/null @@ -1,82 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::big_fish_run_status_type::BigFishRunStatus; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -pub struct BigFishRuntimeRun { - pub run_id: String, - pub session_id: String, - pub owner_user_id: String, - pub status: BigFishRunStatus, - pub snapshot_json: String, - pub last_input_x: f32, - pub last_input_y: f32, - pub tick: u64, - pub created_at: __sdk::Timestamp, - pub updated_at: __sdk::Timestamp, -} - -impl __sdk::InModule for BigFishRuntimeRun { - type Module = super::RemoteModule; -} - -/// Column accessor struct for the table `BigFishRuntimeRun`. -/// -/// Provides typed access to columns for query building. -pub struct BigFishRuntimeRunCols { - pub run_id: __sdk::__query_builder::Col, - pub session_id: __sdk::__query_builder::Col, - pub owner_user_id: __sdk::__query_builder::Col, - pub status: __sdk::__query_builder::Col, - pub snapshot_json: __sdk::__query_builder::Col, - pub last_input_x: __sdk::__query_builder::Col, - pub last_input_y: __sdk::__query_builder::Col, - pub tick: __sdk::__query_builder::Col, - pub created_at: __sdk::__query_builder::Col, - pub updated_at: __sdk::__query_builder::Col, -} - -impl __sdk::__query_builder::HasCols for BigFishRuntimeRun { - type Cols = BigFishRuntimeRunCols; - fn cols(table_name: &'static str) -> Self::Cols { - BigFishRuntimeRunCols { - run_id: __sdk::__query_builder::Col::new(table_name, "run_id"), - session_id: __sdk::__query_builder::Col::new(table_name, "session_id"), - owner_user_id: __sdk::__query_builder::Col::new(table_name, "owner_user_id"), - status: __sdk::__query_builder::Col::new(table_name, "status"), - snapshot_json: __sdk::__query_builder::Col::new(table_name, "snapshot_json"), - last_input_x: __sdk::__query_builder::Col::new(table_name, "last_input_x"), - last_input_y: __sdk::__query_builder::Col::new(table_name, "last_input_y"), - tick: __sdk::__query_builder::Col::new(table_name, "tick"), - created_at: __sdk::__query_builder::Col::new(table_name, "created_at"), - updated_at: __sdk::__query_builder::Col::new(table_name, "updated_at"), - } - } -} - -/// Indexed column accessor struct for the table `BigFishRuntimeRun`. -/// -/// Provides typed access to indexed columns for query building. -pub struct BigFishRuntimeRunIxCols { - pub owner_user_id: __sdk::__query_builder::IxCol, - pub run_id: __sdk::__query_builder::IxCol, - pub session_id: __sdk::__query_builder::IxCol, -} - -impl __sdk::__query_builder::HasIxCols for BigFishRuntimeRun { - type IxCols = BigFishRuntimeRunIxCols; - fn ix_cols(table_name: &'static str) -> Self::IxCols { - BigFishRuntimeRunIxCols { - owner_user_id: __sdk::__query_builder::IxCol::new(table_name, "owner_user_id"), - run_id: __sdk::__query_builder::IxCol::new(table_name, "run_id"), - session_id: __sdk::__query_builder::IxCol::new(table_name, "session_id"), - } - } -} - -impl __sdk::__query_builder::CanBeLookupTable for BigFishRuntimeRun {} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_snapshot_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_snapshot_type.rs deleted file mode 100644 index 32f2a80c..00000000 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_snapshot_type.rs +++ /dev/null @@ -1,31 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::big_fish_run_status_type::BigFishRunStatus; -use super::big_fish_runtime_entity_type::BigFishRuntimeEntity; -use super::big_fish_vector_2_type::BigFishVector2; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -pub struct BigFishRuntimeSnapshot { - pub run_id: String, - pub session_id: String, - pub status: BigFishRunStatus, - pub tick: u64, - pub player_level: u32, - pub win_level: u32, - pub leader_entity_id: Option, - pub owned_entities: Vec, - pub wild_entities: Vec, - pub camera_center: BigFishVector2, - pub last_input: BigFishVector2, - pub event_log: Vec, - pub updated_at_micros: i64, -} - -impl __sdk::InModule for BigFishRuntimeSnapshot { - type Module = super::RemoteModule; -} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_status_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_authorize_operator_input_type.rs similarity index 64% rename from server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_status_type.rs rename to server-rs/crates/spacetime-client/src/module_bindings/database_migration_authorize_operator_input_type.rs index 6bb94f36..309de81f 100644 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_status_type.rs +++ b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_authorize_operator_input_type.rs @@ -6,15 +6,12 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -#[derive(Copy, Eq, Hash)] -pub enum BigFishRunStatus { - Running, - - Won, - - Failed, +pub struct DatabaseMigrationAuthorizeOperatorInput { + pub bootstrap_secret: String, + pub operator_identity_hex: String, + pub note: String, } -impl __sdk::InModule for BigFishRunStatus { +impl __sdk::InModule for DatabaseMigrationAuthorizeOperatorInput { type Module = super::RemoteModule; } diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_get_input_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_export_input_type.rs similarity index 74% rename from server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_get_input_type.rs rename to server-rs/crates/spacetime-client/src/module_bindings/database_migration_export_input_type.rs index 8e0d0d8d..39381901 100644 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_get_input_type.rs +++ b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_export_input_type.rs @@ -6,11 +6,10 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -pub struct BigFishRunGetInput { - pub run_id: String, - pub owner_user_id: String, +pub struct DatabaseMigrationExportInput { + pub include_tables: Vec, } -impl __sdk::InModule for BigFishRunGetInput { +impl __sdk::InModule for DatabaseMigrationExportInput { type Module = super::RemoteModule; } diff --git a/server-rs/crates/spacetime-client/src/module_bindings/database_migration_import_input_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_import_input_type.rs new file mode 100644 index 00000000..6bdbcb91 --- /dev/null +++ b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_import_input_type.rs @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DatabaseMigrationImportInput { + pub migration_json: String, + pub include_tables: Vec, + pub replace_existing: bool, + pub dry_run: bool, +} + +impl __sdk::InModule for DatabaseMigrationImportInput { + type Module = super::RemoteModule; +} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_procedure_result_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_operator_procedure_result_type.rs similarity index 68% rename from server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_procedure_result_type.rs rename to server-rs/crates/spacetime-client/src/module_bindings/database_migration_operator_procedure_result_type.rs index 86d73fc2..0c7e3e65 100644 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_procedure_result_type.rs +++ b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_operator_procedure_result_type.rs @@ -4,16 +4,14 @@ #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; -use super::big_fish_runtime_snapshot_type::BigFishRuntimeSnapshot; - #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -pub struct BigFishRunProcedureResult { +pub struct DatabaseMigrationOperatorProcedureResult { pub ok: bool, - pub run: Option, + pub operator_identity_hex: Option, pub error_message: Option, } -impl __sdk::InModule for BigFishRunProcedureResult { +impl __sdk::InModule for DatabaseMigrationOperatorProcedureResult { type Module = super::RemoteModule; } diff --git a/server-rs/crates/spacetime-client/src/module_bindings/database_migration_operator_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_operator_type.rs new file mode 100644 index 00000000..7f8b0b6e --- /dev/null +++ b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_operator_type.rs @@ -0,0 +1,59 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DatabaseMigrationOperator { + pub operator_identity: __sdk::Identity, + pub created_at: __sdk::Timestamp, + pub created_by: __sdk::Identity, + pub note: String, +} + +impl __sdk::InModule for DatabaseMigrationOperator { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `DatabaseMigrationOperator`. +/// +/// Provides typed access to columns for query building. +pub struct DatabaseMigrationOperatorCols { + pub operator_identity: __sdk::__query_builder::Col, + pub created_at: __sdk::__query_builder::Col, + pub created_by: __sdk::__query_builder::Col, + pub note: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for DatabaseMigrationOperator { + type Cols = DatabaseMigrationOperatorCols; + fn cols(table_name: &'static str) -> Self::Cols { + DatabaseMigrationOperatorCols { + operator_identity: __sdk::__query_builder::Col::new(table_name, "operator_identity"), + created_at: __sdk::__query_builder::Col::new(table_name, "created_at"), + created_by: __sdk::__query_builder::Col::new(table_name, "created_by"), + note: __sdk::__query_builder::Col::new(table_name, "note"), + } + } +} + +/// Indexed column accessor struct for the table `DatabaseMigrationOperator`. +/// +/// Provides typed access to indexed columns for query building. +pub struct DatabaseMigrationOperatorIxCols { + pub operator_identity: + __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for DatabaseMigrationOperator { + type IxCols = DatabaseMigrationOperatorIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + DatabaseMigrationOperatorIxCols { + operator_identity: __sdk::__query_builder::IxCol::new(table_name, "operator_identity"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for DatabaseMigrationOperator {} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_entity_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_procedure_result_type.rs similarity index 52% rename from server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_entity_type.rs rename to server-rs/crates/spacetime-client/src/module_bindings/database_migration_procedure_result_type.rs index ee7c160c..a3869c5d 100644 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_runtime_entity_type.rs +++ b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_procedure_result_type.rs @@ -4,18 +4,18 @@ #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; -use super::big_fish_vector_2_type::BigFishVector2; +use super::database_migration_table_stat_type::DatabaseMigrationTableStat; #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -pub struct BigFishRuntimeEntity { - pub entity_id: String, - pub level: u32, - pub position: BigFishVector2, - pub radius: f32, - pub offscreen_seconds: f32, +pub struct DatabaseMigrationProcedureResult { + pub ok: bool, + pub schema_version: u32, + pub migration_json: Option, + pub table_stats: Vec, + pub error_message: Option, } -impl __sdk::InModule for BigFishRuntimeEntity { +impl __sdk::InModule for DatabaseMigrationProcedureResult { type Module = super::RemoteModule; } diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_vector_2_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_revoke_operator_input_type.rs similarity index 72% rename from server-rs/crates/spacetime-client/src/module_bindings/big_fish_vector_2_type.rs rename to server-rs/crates/spacetime-client/src/module_bindings/database_migration_revoke_operator_input_type.rs index 745063ad..e550a7a4 100644 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_vector_2_type.rs +++ b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_revoke_operator_input_type.rs @@ -6,11 +6,10 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -pub struct BigFishVector2 { - pub x: f32, - pub y: f32, +pub struct DatabaseMigrationRevokeOperatorInput { + pub operator_identity_hex: String, } -impl __sdk::InModule for BigFishVector2 { +impl __sdk::InModule for DatabaseMigrationRevokeOperatorInput { type Module = super::RemoteModule; } diff --git a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_start_input_type.rs b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_table_stat_type.rs similarity index 64% rename from server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_start_input_type.rs rename to server-rs/crates/spacetime-client/src/module_bindings/database_migration_table_stat_type.rs index 944fa6da..1257661d 100644 --- a/server-rs/crates/spacetime-client/src/module_bindings/big_fish_run_start_input_type.rs +++ b/server-rs/crates/spacetime-client/src/module_bindings/database_migration_table_stat_type.rs @@ -6,13 +6,13 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -pub struct BigFishRunStartInput { - pub run_id: String, - pub session_id: String, - pub owner_user_id: String, - pub started_at_micros: i64, +pub struct DatabaseMigrationTableStat { + pub table_name: String, + pub exported_row_count: u64, + pub imported_row_count: u64, + pub skipped_row_count: u64, } -impl __sdk::InModule for BigFishRunStartInput { +impl __sdk::InModule for DatabaseMigrationTableStat { type Module = super::RemoteModule; } diff --git a/server-rs/crates/spacetime-client/src/module_bindings/export_database_migration_to_file_procedure.rs b/server-rs/crates/spacetime-client/src/module_bindings/export_database_migration_to_file_procedure.rs new file mode 100644 index 00000000..3dfe18f8 --- /dev/null +++ b/server-rs/crates/spacetime-client/src/module_bindings/export_database_migration_to_file_procedure.rs @@ -0,0 +1,59 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::database_migration_export_input_type::DatabaseMigrationExportInput; +use super::database_migration_procedure_result_type::DatabaseMigrationProcedureResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct ExportDatabaseMigrationToFileArgs { + pub input: DatabaseMigrationExportInput, +} + +impl __sdk::InModule for ExportDatabaseMigrationToFileArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `export_database_migration_to_file`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait export_database_migration_to_file { + fn export_database_migration_to_file(&self, input: DatabaseMigrationExportInput) { + self.export_database_migration_to_file_then(input, |_, _| {}); + } + + fn export_database_migration_to_file_then( + &self, + input: DatabaseMigrationExportInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ); +} + +impl export_database_migration_to_file for super::RemoteProcedures { + fn export_database_migration_to_file_then( + &self, + input: DatabaseMigrationExportInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, DatabaseMigrationProcedureResult>( + "export_database_migration_to_file", + ExportDatabaseMigrationToFileArgs { input }, + __callback, + ); + } +} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/get_big_fish_run_procedure.rs b/server-rs/crates/spacetime-client/src/module_bindings/get_big_fish_run_procedure.rs deleted file mode 100644 index 867a6759..00000000 --- a/server-rs/crates/spacetime-client/src/module_bindings/get_big_fish_run_procedure.rs +++ /dev/null @@ -1,59 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::big_fish_run_get_input_type::BigFishRunGetInput; -use super::big_fish_run_procedure_result_type::BigFishRunProcedureResult; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct GetBigFishRunArgs { - pub input: BigFishRunGetInput, -} - -impl __sdk::InModule for GetBigFishRunArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `get_big_fish_run`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait get_big_fish_run { - fn get_big_fish_run(&self, input: BigFishRunGetInput) { - self.get_big_fish_run_then(input, |_, _| {}); - } - - fn get_big_fish_run_then( - &self, - input: BigFishRunGetInput, - - __callback: impl FnOnce( - &super::ProcedureEventContext, - Result, - ) + Send - + 'static, - ); -} - -impl get_big_fish_run for super::RemoteProcedures { - fn get_big_fish_run_then( - &self, - input: BigFishRunGetInput, - - __callback: impl FnOnce( - &super::ProcedureEventContext, - Result, - ) + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, BigFishRunProcedureResult>( - "get_big_fish_run", - GetBigFishRunArgs { input }, - __callback, - ); - } -} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/import_database_migration_from_file_procedure.rs b/server-rs/crates/spacetime-client/src/module_bindings/import_database_migration_from_file_procedure.rs new file mode 100644 index 00000000..7b2322ee --- /dev/null +++ b/server-rs/crates/spacetime-client/src/module_bindings/import_database_migration_from_file_procedure.rs @@ -0,0 +1,59 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::database_migration_import_input_type::DatabaseMigrationImportInput; +use super::database_migration_procedure_result_type::DatabaseMigrationProcedureResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct ImportDatabaseMigrationFromFileArgs { + pub input: DatabaseMigrationImportInput, +} + +impl __sdk::InModule for ImportDatabaseMigrationFromFileArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `import_database_migration_from_file`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait import_database_migration_from_file { + fn import_database_migration_from_file(&self, input: DatabaseMigrationImportInput) { + self.import_database_migration_from_file_then(input, |_, _| {}); + } + + fn import_database_migration_from_file_then( + &self, + input: DatabaseMigrationImportInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ); +} + +impl import_database_migration_from_file for super::RemoteProcedures { + fn import_database_migration_from_file_then( + &self, + input: DatabaseMigrationImportInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, DatabaseMigrationProcedureResult>( + "import_database_migration_from_file", + ImportDatabaseMigrationFromFileArgs { input }, + __callback, + ); + } +} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/import_database_migration_incremental_from_file_procedure.rs b/server-rs/crates/spacetime-client/src/module_bindings/import_database_migration_incremental_from_file_procedure.rs new file mode 100644 index 00000000..2fc31804 --- /dev/null +++ b/server-rs/crates/spacetime-client/src/module_bindings/import_database_migration_incremental_from_file_procedure.rs @@ -0,0 +1,59 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::database_migration_import_input_type::DatabaseMigrationImportInput; +use super::database_migration_procedure_result_type::DatabaseMigrationProcedureResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct ImportDatabaseMigrationIncrementalFromFileArgs { + pub input: DatabaseMigrationImportInput, +} + +impl __sdk::InModule for ImportDatabaseMigrationIncrementalFromFileArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `import_database_migration_incremental_from_file`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait import_database_migration_incremental_from_file { + fn import_database_migration_incremental_from_file(&self, input: DatabaseMigrationImportInput) { + self.import_database_migration_incremental_from_file_then(input, |_, _| {}); + } + + fn import_database_migration_incremental_from_file_then( + &self, + input: DatabaseMigrationImportInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ); +} + +impl import_database_migration_incremental_from_file for super::RemoteProcedures { + fn import_database_migration_incremental_from_file_then( + &self, + input: DatabaseMigrationImportInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, DatabaseMigrationProcedureResult>( + "import_database_migration_incremental_from_file", + ImportDatabaseMigrationIncrementalFromFileArgs { input }, + __callback, + ); + } +} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/mod.rs b/server-rs/crates/spacetime-client/src/module_bindings/mod.rs index 62c0f774..adf910a1 100644 --- a/server-rs/crates/spacetime-client/src/module_bindings/mod.rs +++ b/server-rs/crates/spacetime-client/src/module_bindings/mod.rs @@ -58,6 +58,7 @@ pub mod auth_store_snapshot_procedure_result_type; pub mod auth_store_snapshot_record_type; pub mod auth_store_snapshot_type; pub mod auth_store_snapshot_upsert_input_type; +pub mod authorize_database_migration_operator_procedure; pub mod battle_mode_type; pub mod battle_state_input_type; pub mod battle_state_procedure_result_type; @@ -89,20 +90,11 @@ pub mod big_fish_level_blueprint_type; pub mod big_fish_message_finalize_input_type; pub mod big_fish_message_submit_input_type; pub mod big_fish_publish_input_type; -pub mod big_fish_run_get_input_type; -pub mod big_fish_run_input_submit_input_type; -pub mod big_fish_run_procedure_result_type; -pub mod big_fish_run_start_input_type; -pub mod big_fish_run_status_type; -pub mod big_fish_runtime_entity_type; pub mod big_fish_runtime_params_type; -pub mod big_fish_runtime_run_type; -pub mod big_fish_runtime_snapshot_type; pub mod big_fish_session_create_input_type; pub mod big_fish_session_get_input_type; pub mod big_fish_session_procedure_result_type; pub mod big_fish_session_snapshot_type; -pub mod big_fish_vector_2_type; pub mod big_fish_work_delete_input_type; pub mod big_fish_works_list_input_type; pub mod big_fish_works_procedure_result_type; @@ -188,6 +180,14 @@ pub mod custom_world_theme_mode_type; pub mod custom_world_work_summary_snapshot_type; pub mod custom_world_works_list_input_type; pub mod custom_world_works_list_result_type; +pub mod database_migration_authorize_operator_input_type; +pub mod database_migration_export_input_type; +pub mod database_migration_import_input_type; +pub mod database_migration_operator_procedure_result_type; +pub mod database_migration_operator_type; +pub mod database_migration_procedure_result_type; +pub mod database_migration_revoke_operator_input_type; +pub mod database_migration_table_stat_type; pub mod delete_big_fish_work_procedure; pub mod delete_custom_world_agent_session_procedure; pub mod delete_custom_world_profile_and_return_procedure; @@ -197,6 +197,7 @@ pub mod drag_puzzle_piece_or_group_procedure; pub mod equip_inventory_item_input_type; pub mod execute_custom_world_agent_action_procedure; pub mod export_auth_store_snapshot_from_tables_procedure; +pub mod export_database_migration_to_file_procedure; pub mod fail_ai_task_and_return_procedure; pub mod finalize_big_fish_agent_message_turn_procedure; pub mod finalize_custom_world_agent_message_turn_procedure; @@ -204,7 +205,6 @@ pub mod finalize_puzzle_agent_message_turn_procedure; pub mod generate_big_fish_asset_procedure; pub mod get_auth_store_snapshot_procedure; pub mod get_battle_state_procedure; -pub mod get_big_fish_run_procedure; pub mod get_big_fish_session_procedure; pub mod get_chapter_progression_procedure; pub mod get_custom_world_agent_card_detail_procedure; @@ -230,6 +230,8 @@ pub mod grant_inventory_item_input_type; pub mod grant_player_progression_experience_and_return_procedure; pub mod grant_player_progression_experience_reducer; pub mod import_auth_store_snapshot_procedure; +pub mod import_database_migration_from_file_procedure; +pub mod import_database_migration_incremental_from_file_procedure; pub mod inventory_container_kind_type; pub mod inventory_equipment_slot_type; pub mod inventory_item_rarity_type; @@ -356,6 +358,7 @@ pub mod resolve_npc_social_action_reducer; pub mod resolve_treasure_interaction_and_return_procedure; pub mod resolve_treasure_interaction_reducer; pub mod resume_profile_save_archive_and_return_procedure; +pub mod revoke_database_migration_operator_procedure; pub mod rpg_agent_draft_card_kind_type; pub mod rpg_agent_draft_card_status_type; pub mod rpg_agent_message_kind_type; @@ -425,7 +428,6 @@ pub mod save_puzzle_generated_images_procedure; pub mod select_puzzle_cover_image_procedure; pub mod start_ai_task_reducer; pub mod start_ai_task_stage_reducer; -pub mod start_big_fish_run_procedure; pub mod start_puzzle_run_procedure; pub mod story_continue_input_type; pub mod story_event_kind_type; @@ -438,7 +440,6 @@ pub mod story_session_state_input_type; pub mod story_session_state_procedure_result_type; pub mod story_session_status_type; pub mod story_session_type; -pub mod submit_big_fish_input_procedure; pub mod submit_big_fish_message_procedure; pub mod submit_custom_world_agent_message_procedure; pub mod submit_puzzle_agent_message_procedure; @@ -519,6 +520,7 @@ pub use auth_store_snapshot_procedure_result_type::AuthStoreSnapshotProcedureRes pub use auth_store_snapshot_record_type::AuthStoreSnapshotRecord; pub use auth_store_snapshot_type::AuthStoreSnapshot; pub use auth_store_snapshot_upsert_input_type::AuthStoreSnapshotUpsertInput; +pub use authorize_database_migration_operator_procedure::authorize_database_migration_operator; pub use battle_mode_type::BattleMode; pub use battle_state_input_type::BattleStateInput; pub use battle_state_procedure_result_type::BattleStateProcedureResult; @@ -550,20 +552,11 @@ pub use big_fish_level_blueprint_type::BigFishLevelBlueprint; pub use big_fish_message_finalize_input_type::BigFishMessageFinalizeInput; pub use big_fish_message_submit_input_type::BigFishMessageSubmitInput; pub use big_fish_publish_input_type::BigFishPublishInput; -pub use big_fish_run_get_input_type::BigFishRunGetInput; -pub use big_fish_run_input_submit_input_type::BigFishRunInputSubmitInput; -pub use big_fish_run_procedure_result_type::BigFishRunProcedureResult; -pub use big_fish_run_start_input_type::BigFishRunStartInput; -pub use big_fish_run_status_type::BigFishRunStatus; -pub use big_fish_runtime_entity_type::BigFishRuntimeEntity; pub use big_fish_runtime_params_type::BigFishRuntimeParams; -pub use big_fish_runtime_run_type::BigFishRuntimeRun; -pub use big_fish_runtime_snapshot_type::BigFishRuntimeSnapshot; pub use big_fish_session_create_input_type::BigFishSessionCreateInput; pub use big_fish_session_get_input_type::BigFishSessionGetInput; pub use big_fish_session_procedure_result_type::BigFishSessionProcedureResult; pub use big_fish_session_snapshot_type::BigFishSessionSnapshot; -pub use big_fish_vector_2_type::BigFishVector2; pub use big_fish_work_delete_input_type::BigFishWorkDeleteInput; pub use big_fish_works_list_input_type::BigFishWorksListInput; pub use big_fish_works_procedure_result_type::BigFishWorksProcedureResult; @@ -649,6 +642,14 @@ pub use custom_world_theme_mode_type::CustomWorldThemeMode; pub use custom_world_work_summary_snapshot_type::CustomWorldWorkSummarySnapshot; pub use custom_world_works_list_input_type::CustomWorldWorksListInput; pub use custom_world_works_list_result_type::CustomWorldWorksListResult; +pub use database_migration_authorize_operator_input_type::DatabaseMigrationAuthorizeOperatorInput; +pub use database_migration_export_input_type::DatabaseMigrationExportInput; +pub use database_migration_import_input_type::DatabaseMigrationImportInput; +pub use database_migration_operator_procedure_result_type::DatabaseMigrationOperatorProcedureResult; +pub use database_migration_operator_type::DatabaseMigrationOperator; +pub use database_migration_procedure_result_type::DatabaseMigrationProcedureResult; +pub use database_migration_revoke_operator_input_type::DatabaseMigrationRevokeOperatorInput; +pub use database_migration_table_stat_type::DatabaseMigrationTableStat; pub use delete_big_fish_work_procedure::delete_big_fish_work; pub use delete_custom_world_agent_session_procedure::delete_custom_world_agent_session; pub use delete_custom_world_profile_and_return_procedure::delete_custom_world_profile_and_return; @@ -658,6 +659,7 @@ pub use drag_puzzle_piece_or_group_procedure::drag_puzzle_piece_or_group; pub use equip_inventory_item_input_type::EquipInventoryItemInput; pub use execute_custom_world_agent_action_procedure::execute_custom_world_agent_action; pub use export_auth_store_snapshot_from_tables_procedure::export_auth_store_snapshot_from_tables; +pub use export_database_migration_to_file_procedure::export_database_migration_to_file; pub use fail_ai_task_and_return_procedure::fail_ai_task_and_return; pub use finalize_big_fish_agent_message_turn_procedure::finalize_big_fish_agent_message_turn; pub use finalize_custom_world_agent_message_turn_procedure::finalize_custom_world_agent_message_turn; @@ -665,7 +667,6 @@ pub use finalize_puzzle_agent_message_turn_procedure::finalize_puzzle_agent_mess pub use generate_big_fish_asset_procedure::generate_big_fish_asset; pub use get_auth_store_snapshot_procedure::get_auth_store_snapshot; pub use get_battle_state_procedure::get_battle_state; -pub use get_big_fish_run_procedure::get_big_fish_run; pub use get_big_fish_session_procedure::get_big_fish_session; pub use get_chapter_progression_procedure::get_chapter_progression; pub use get_custom_world_agent_card_detail_procedure::get_custom_world_agent_card_detail; @@ -691,6 +692,8 @@ pub use grant_inventory_item_input_type::GrantInventoryItemInput; pub use grant_player_progression_experience_and_return_procedure::grant_player_progression_experience_and_return; pub use grant_player_progression_experience_reducer::grant_player_progression_experience; pub use import_auth_store_snapshot_procedure::import_auth_store_snapshot; +pub use import_database_migration_from_file_procedure::import_database_migration_from_file; +pub use import_database_migration_incremental_from_file_procedure::import_database_migration_incremental_from_file; pub use inventory_container_kind_type::InventoryContainerKind; pub use inventory_equipment_slot_type::InventoryEquipmentSlot; pub use inventory_item_rarity_type::InventoryItemRarity; @@ -817,6 +820,7 @@ pub use resolve_npc_social_action_reducer::resolve_npc_social_action; pub use resolve_treasure_interaction_and_return_procedure::resolve_treasure_interaction_and_return; pub use resolve_treasure_interaction_reducer::resolve_treasure_interaction; pub use resume_profile_save_archive_and_return_procedure::resume_profile_save_archive_and_return; +pub use revoke_database_migration_operator_procedure::revoke_database_migration_operator; pub use rpg_agent_draft_card_kind_type::RpgAgentDraftCardKind; pub use rpg_agent_draft_card_status_type::RpgAgentDraftCardStatus; pub use rpg_agent_message_kind_type::RpgAgentMessageKind; @@ -886,7 +890,6 @@ pub use save_puzzle_generated_images_procedure::save_puzzle_generated_images; pub use select_puzzle_cover_image_procedure::select_puzzle_cover_image; pub use start_ai_task_reducer::start_ai_task; pub use start_ai_task_stage_reducer::start_ai_task_stage; -pub use start_big_fish_run_procedure::start_big_fish_run; pub use start_puzzle_run_procedure::start_puzzle_run; pub use story_continue_input_type::StoryContinueInput; pub use story_event_kind_type::StoryEventKind; @@ -899,7 +902,6 @@ pub use story_session_state_input_type::StorySessionStateInput; pub use story_session_state_procedure_result_type::StorySessionStateProcedureResult; pub use story_session_status_type::StorySessionStatus; pub use story_session_type::StorySession; -pub use submit_big_fish_input_procedure::submit_big_fish_input; pub use submit_big_fish_message_procedure::submit_big_fish_message; pub use submit_custom_world_agent_message_procedure::submit_custom_world_agent_message; pub use submit_puzzle_agent_message_procedure::submit_puzzle_agent_message; diff --git a/server-rs/crates/spacetime-client/src/module_bindings/revoke_database_migration_operator_procedure.rs b/server-rs/crates/spacetime-client/src/module_bindings/revoke_database_migration_operator_procedure.rs new file mode 100644 index 00000000..feb5086e --- /dev/null +++ b/server-rs/crates/spacetime-client/src/module_bindings/revoke_database_migration_operator_procedure.rs @@ -0,0 +1,59 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::database_migration_operator_procedure_result_type::DatabaseMigrationOperatorProcedureResult; +use super::database_migration_revoke_operator_input_type::DatabaseMigrationRevokeOperatorInput; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct RevokeDatabaseMigrationOperatorArgs { + pub input: DatabaseMigrationRevokeOperatorInput, +} + +impl __sdk::InModule for RevokeDatabaseMigrationOperatorArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `revoke_database_migration_operator`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait revoke_database_migration_operator { + fn revoke_database_migration_operator(&self, input: DatabaseMigrationRevokeOperatorInput) { + self.revoke_database_migration_operator_then(input, |_, _| {}); + } + + fn revoke_database_migration_operator_then( + &self, + input: DatabaseMigrationRevokeOperatorInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ); +} + +impl revoke_database_migration_operator for super::RemoteProcedures { + fn revoke_database_migration_operator_then( + &self, + input: DatabaseMigrationRevokeOperatorInput, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, + ) + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, DatabaseMigrationOperatorProcedureResult>( + "revoke_database_migration_operator", + RevokeDatabaseMigrationOperatorArgs { input }, + __callback, + ); + } +} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/start_big_fish_run_procedure.rs b/server-rs/crates/spacetime-client/src/module_bindings/start_big_fish_run_procedure.rs deleted file mode 100644 index 7f3713ab..00000000 --- a/server-rs/crates/spacetime-client/src/module_bindings/start_big_fish_run_procedure.rs +++ /dev/null @@ -1,59 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::big_fish_run_procedure_result_type::BigFishRunProcedureResult; -use super::big_fish_run_start_input_type::BigFishRunStartInput; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct StartBigFishRunArgs { - pub input: BigFishRunStartInput, -} - -impl __sdk::InModule for StartBigFishRunArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `start_big_fish_run`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait start_big_fish_run { - fn start_big_fish_run(&self, input: BigFishRunStartInput) { - self.start_big_fish_run_then(input, |_, _| {}); - } - - fn start_big_fish_run_then( - &self, - input: BigFishRunStartInput, - - __callback: impl FnOnce( - &super::ProcedureEventContext, - Result, - ) + Send - + 'static, - ); -} - -impl start_big_fish_run for super::RemoteProcedures { - fn start_big_fish_run_then( - &self, - input: BigFishRunStartInput, - - __callback: impl FnOnce( - &super::ProcedureEventContext, - Result, - ) + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, BigFishRunProcedureResult>( - "start_big_fish_run", - StartBigFishRunArgs { input }, - __callback, - ); - } -} diff --git a/server-rs/crates/spacetime-client/src/module_bindings/submit_big_fish_input_procedure.rs b/server-rs/crates/spacetime-client/src/module_bindings/submit_big_fish_input_procedure.rs deleted file mode 100644 index 21e8f82d..00000000 --- a/server-rs/crates/spacetime-client/src/module_bindings/submit_big_fish_input_procedure.rs +++ /dev/null @@ -1,59 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::big_fish_run_input_submit_input_type::BigFishRunInputSubmitInput; -use super::big_fish_run_procedure_result_type::BigFishRunProcedureResult; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct SubmitBigFishInputArgs { - pub input: BigFishRunInputSubmitInput, -} - -impl __sdk::InModule for SubmitBigFishInputArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `submit_big_fish_input`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait submit_big_fish_input { - fn submit_big_fish_input(&self, input: BigFishRunInputSubmitInput) { - self.submit_big_fish_input_then(input, |_, _| {}); - } - - fn submit_big_fish_input_then( - &self, - input: BigFishRunInputSubmitInput, - - __callback: impl FnOnce( - &super::ProcedureEventContext, - Result, - ) + Send - + 'static, - ); -} - -impl submit_big_fish_input for super::RemoteProcedures { - fn submit_big_fish_input_then( - &self, - input: BigFishRunInputSubmitInput, - - __callback: impl FnOnce( - &super::ProcedureEventContext, - Result, - ) + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, BigFishRunProcedureResult>( - "submit_big_fish_input", - SubmitBigFishInputArgs { input }, - __callback, - ); - } -} diff --git a/server-rs/crates/spacetime-module/src/big_fish/mod.rs b/server-rs/crates/spacetime-module/src/big_fish/mod.rs index 968034c3..8478f897 100644 --- a/server-rs/crates/spacetime-module/src/big_fish/mod.rs +++ b/server-rs/crates/spacetime-module/src/big_fish/mod.rs @@ -1,9 +1,7 @@ mod assets; -mod runtime; mod session; mod tables; pub use assets::*; -pub use runtime::*; pub use session::*; pub use tables::*; diff --git a/server-rs/crates/spacetime-module/src/big_fish/runtime.rs b/server-rs/crates/spacetime-module/src/big_fish/runtime.rs deleted file mode 100644 index d794b653..00000000 --- a/server-rs/crates/spacetime-module/src/big_fish/runtime.rs +++ /dev/null @@ -1,198 +0,0 @@ -use crate::big_fish::tables::{big_fish_creation_session, big_fish_runtime_run}; -use crate::*; - -#[spacetimedb::procedure] -pub fn start_big_fish_run( - ctx: &mut ProcedureContext, - input: BigFishRunStartInput, -) -> BigFishRunProcedureResult { - match ctx.try_with_tx(|tx| start_big_fish_run_tx(tx, input.clone())) { - Ok(run) => BigFishRunProcedureResult { - ok: true, - run: Some(run), - error_message: None, - }, - Err(message) => BigFishRunProcedureResult { - ok: false, - run: None, - error_message: Some(message), - }, - } -} - -#[spacetimedb::procedure] -pub fn submit_big_fish_input( - ctx: &mut ProcedureContext, - input: BigFishRunInputSubmitInput, -) -> BigFishRunProcedureResult { - match ctx.try_with_tx(|tx| submit_big_fish_input_tx(tx, input.clone())) { - Ok(run) => BigFishRunProcedureResult { - ok: true, - run: Some(run), - error_message: None, - }, - Err(message) => BigFishRunProcedureResult { - ok: false, - run: None, - error_message: Some(message), - }, - } -} - -#[spacetimedb::procedure] -pub fn get_big_fish_run( - ctx: &mut ProcedureContext, - input: BigFishRunGetInput, -) -> BigFishRunProcedureResult { - match ctx.try_with_tx(|tx| get_big_fish_run_tx(tx, input.clone())) { - Ok(run) => BigFishRunProcedureResult { - ok: true, - run: Some(run), - error_message: None, - }, - Err(message) => BigFishRunProcedureResult { - ok: false, - run: None, - error_message: Some(message), - }, - } -} - -fn start_big_fish_run_tx( - ctx: &ReducerContext, - input: BigFishRunStartInput, -) -> Result { - validate_run_start_input(&input).map_err(|error| error.to_string())?; - if ctx - .db - .big_fish_runtime_run() - .run_id() - .find(&input.run_id) - .is_some() - { - return Err("big_fish_runtime_run.run_id 已存在".to_string()); - } - let session = ctx - .db - .big_fish_creation_session() - .session_id() - .find(&input.session_id) - .ok_or_else(|| "big_fish_creation_session 不存在".to_string())?; - if session.owner_user_id != input.owner_user_id - && session.stage != BigFishCreationStage::Published - { - return Err("big_fish_creation_session 不存在".to_string()); - } - let draft = session - .draft_json - .as_deref() - .ok_or_else(|| "big_fish.draft 尚未编译".to_string()) - .and_then(|value| deserialize_draft(value).map_err(|error| error.to_string()))?; - let snapshot = build_initial_runtime_snapshot( - input.run_id.clone(), - input.session_id.clone(), - &draft, - input.started_at_micros, - ); - let now = Timestamp::from_micros_since_unix_epoch(input.started_at_micros); - ctx.db.big_fish_runtime_run().insert(BigFishRuntimeRun { - run_id: input.run_id, - session_id: input.session_id, - owner_user_id: input.owner_user_id, - status: snapshot.status, - snapshot_json: serialize_runtime_snapshot(&snapshot).map_err(|error| error.to_string())?, - last_input_x: 0.0, - last_input_y: 0.0, - tick: snapshot.tick, - created_at: now, - updated_at: now, - }); - - Ok(snapshot) -} - -fn submit_big_fish_input_tx( - ctx: &ReducerContext, - input: BigFishRunInputSubmitInput, -) -> Result { - validate_run_input_submit_input(&input).map_err(|error| error.to_string())?; - let run = ctx - .db - .big_fish_runtime_run() - .run_id() - .find(&input.run_id) - .filter(|row| row.owner_user_id == input.owner_user_id) - .ok_or_else(|| "big_fish_runtime_run 不存在".to_string())?; - let session = ctx - .db - .big_fish_creation_session() - .session_id() - .find(&run.session_id) - .ok_or_else(|| "big_fish_creation_session 不存在".to_string())?; - if session.owner_user_id != input.owner_user_id - && session.stage != BigFishCreationStage::Published - { - return Err("big_fish_creation_session 不存在".to_string()); - } - let draft = session - .draft_json - .as_deref() - .ok_or_else(|| "big_fish.draft 尚未编译".to_string()) - .and_then(|value| deserialize_draft(value).map_err(|error| error.to_string()))?; - let current_snapshot = - deserialize_runtime_snapshot(&run.snapshot_json).map_err(|error| error.to_string())?; - let next_snapshot = advance_runtime_snapshot( - current_snapshot, - &draft.runtime_params, - input.input_x, - input.input_y, - input.submitted_at_micros, - ); - replace_big_fish_run( - ctx, - &run, - BigFishRuntimeRun { - run_id: run.run_id.clone(), - session_id: run.session_id.clone(), - owner_user_id: run.owner_user_id.clone(), - status: next_snapshot.status, - snapshot_json: serialize_runtime_snapshot(&next_snapshot) - .map_err(|error| error.to_string())?, - last_input_x: input.input_x, - last_input_y: input.input_y, - tick: next_snapshot.tick, - created_at: run.created_at, - updated_at: Timestamp::from_micros_since_unix_epoch(input.submitted_at_micros), - }, - ); - - Ok(next_snapshot) -} - -fn get_big_fish_run_tx( - ctx: &ReducerContext, - input: BigFishRunGetInput, -) -> Result { - validate_run_get_input(&input).map_err(|error| error.to_string())?; - let run = ctx - .db - .big_fish_runtime_run() - .run_id() - .find(&input.run_id) - .filter(|row| row.owner_user_id == input.owner_user_id) - .ok_or_else(|| "big_fish_runtime_run 不存在".to_string())?; - - deserialize_runtime_snapshot(&run.snapshot_json).map_err(|error| error.to_string()) -} - -fn replace_big_fish_run( - ctx: &ReducerContext, - current: &BigFishRuntimeRun, - next: BigFishRuntimeRun, -) { - ctx.db - .big_fish_runtime_run() - .run_id() - .delete(¤t.run_id); - ctx.db.big_fish_runtime_run().insert(next); -} diff --git a/server-rs/crates/spacetime-module/src/big_fish/session.rs b/server-rs/crates/spacetime-module/src/big_fish/session.rs index 28d5bdaa..01459d39 100644 --- a/server-rs/crates/spacetime-module/src/big_fish/session.rs +++ b/server-rs/crates/spacetime-module/src/big_fish/session.rs @@ -295,7 +295,7 @@ pub(crate) fn delete_big_fish_work_tx( .filter(|row| row.owner_user_id == input.owner_user_id) .ok_or_else(|| "big_fish_creation_session 不存在".to_string())?; - // 删除作品时同步清理 Agent 消息、素材槽与运行快照,避免创作页消失后残留孤儿数据。 + // 删除作品时同步清理 Agent 消息与素材槽;最终游玩模拟已经迁到前端,不再写后端运行快照。 ctx.db .big_fish_creation_session() .session_id() @@ -321,18 +321,6 @@ pub(crate) fn delete_big_fish_work_tx( { ctx.db.big_fish_asset_slot().slot_id().delete(&slot.slot_id); } - for run in ctx - .db - .big_fish_runtime_run() - .iter() - .filter(|row| { - row.session_id == input.session_id && row.owner_user_id == input.owner_user_id - }) - .collect::>() - { - ctx.db.big_fish_runtime_run().run_id().delete(&run.run_id); - } - list_big_fish_works_tx( ctx, BigFishWorksListInput { diff --git a/server-rs/crates/spacetime-module/src/big_fish/tables.rs b/server-rs/crates/spacetime-module/src/big_fish/tables.rs index 00b7f169..1e280ef6 100644 --- a/server-rs/crates/spacetime-module/src/big_fish/tables.rs +++ b/server-rs/crates/spacetime-module/src/big_fish/tables.rs @@ -51,22 +51,3 @@ pub struct BigFishAssetSlot { pub(crate) prompt_snapshot: String, pub(crate) updated_at: Timestamp, } - -#[spacetimedb::table( - accessor = big_fish_runtime_run, - index(accessor = by_big_fish_run_owner_user_id, btree(columns = [owner_user_id])), - index(accessor = by_big_fish_run_session_id, btree(columns = [session_id])) -)] -pub struct BigFishRuntimeRun { - #[primary_key] - pub(crate) run_id: String, - pub(crate) session_id: String, - pub(crate) owner_user_id: String, - pub(crate) status: BigFishRunStatus, - pub(crate) snapshot_json: String, - pub(crate) last_input_x: f32, - pub(crate) last_input_y: f32, - pub(crate) tick: u64, - pub(crate) created_at: Timestamp, - pub(crate) updated_at: Timestamp, -} diff --git a/server-rs/crates/spacetime-module/src/migration.rs b/server-rs/crates/spacetime-module/src/migration.rs index 3a128daf..37a52649 100644 --- a/server-rs/crates/spacetime-module/src/migration.rs +++ b/server-rs/crates/spacetime-module/src/migration.rs @@ -140,8 +140,7 @@ macro_rules! migration_tables { puzzle_runtime_run, big_fish_creation_session, big_fish_agent_message, - big_fish_asset_slot, - big_fish_runtime_run + big_fish_asset_slot } }; } diff --git a/src/components/big-fish-runtime/BigFishRuntimeShell.test.tsx b/src/components/big-fish-runtime/BigFishRuntimeShell.test.tsx index 51a771db..c4775a94 100644 --- a/src/components/big-fish-runtime/BigFishRuntimeShell.test.tsx +++ b/src/components/big-fish-runtime/BigFishRuntimeShell.test.tsx @@ -1,7 +1,7 @@ // @vitest-environment jsdom -import { fireEvent, render, screen } from '@testing-library/react'; -import { describe, expect, test, vi } from 'vitest'; +import { act, fireEvent, render, screen } from '@testing-library/react'; +import { afterEach, describe, expect, test, vi } from 'vitest'; import type { BigFishRuntimeSnapshotResponse } from '../../../packages/shared/src/contracts/bigFish'; import { BigFishRuntimeShell } from './BigFishRuntimeShell'; @@ -38,7 +38,21 @@ function createRun( }; } +function dispatchPointerEvent( + target: HTMLElement, + type: string, + options: { pointerId: number; clientX: number; clientY: number }, +) { + const event = new Event(type, { bubbles: true, cancelable: true }); + Object.assign(event, options); + target.dispatchEvent(event); +} + describe('BigFishRuntimeShell', () => { + afterEach(() => { + vi.useRealTimers(); + }); + test('renders restart and exit actions after a failed run', () => { const onBack = vi.fn(); const onRestart = vi.fn(); @@ -96,4 +110,51 @@ describe('BigFishRuntimeShell', () => { expect(screen.queryByRole('dialog', { name: '玩法规则' })).toBeNull(); }); + + test('keeps moving in the last sampled direction after drag ends', () => { + vi.useFakeTimers(); + const onSubmitInput = vi.fn(); + + const { container } = render( + {}} + onSubmitInput={onSubmitInput} + />, + ); + const stage = container.querySelector('.touch-none'); + if (!(stage instanceof HTMLElement)) { + throw new Error('Missing big fish stage'); + } + + act(() => { + dispatchPointerEvent(stage, 'pointerdown', { + pointerId: 1, + clientX: 100, + clientY: 100, + }); + }); + act(() => { + dispatchPointerEvent(stage, 'pointermove', { + pointerId: 1, + clientX: 140, + clientY: 100, + }); + }); + act(() => { + vi.advanceTimersByTime(100); + }); + act(() => { + dispatchPointerEvent(stage, 'pointerup', { + pointerId: 1, + clientX: 140, + clientY: 100, + }); + }); + act(() => { + vi.advanceTimersByTime(220); + }); + + expect(onSubmitInput).toHaveBeenLastCalledWith({ x: 1, y: 0 }); + }); }); diff --git a/src/components/big-fish-runtime/BigFishRuntimeShell.tsx b/src/components/big-fish-runtime/BigFishRuntimeShell.tsx index f4c6b353..f169ace3 100644 --- a/src/components/big-fish-runtime/BigFishRuntimeShell.tsx +++ b/src/components/big-fish-runtime/BigFishRuntimeShell.tsx @@ -16,6 +16,8 @@ type TouchOrigin = { y: number; }; +type TouchSample = TouchOrigin; + type BigFishRuntimeShellProps = { run: BigFishRuntimeSnapshotResponse | null; assetSlots?: BigFishAssetSlotResponse[]; @@ -42,14 +44,13 @@ function normalizeVector(x: number, y: number) { }; } -function resolveDirectionFromOrigin( - origin: TouchOrigin, - clientX: number, - clientY: number, -) { - const deadZone = 12; - const deltaX = clientX - origin.x; - const deltaY = clientY - origin.y; +function resolveDirectionFromSample(previous: TouchSample, current: TouchSample) { + const deadZone = 4; + const deltaX = current.x - previous.x; + const deltaY = current.y - previous.y; + if (!Number.isFinite(deltaX) || !Number.isFinite(deltaY)) { + return { x: 0, y: 0 }; + } if (Math.hypot(deltaX, deltaY) < deadZone) { return { x: 0, y: 0 }; } @@ -147,7 +148,7 @@ function BigFishRuleModal({ >
- 拖动屏幕控制方向,角色会按固定速度移动。 + 拖动屏幕改变方向,角色会按固定速度移动。
低级或同级野生实体会被收编。
@@ -178,15 +179,15 @@ function BigFishEntityDot({ return (
run.playerLevel - ? 'border-rose-100/80 shadow-rose-950/28' - : 'border-emerald-100/80 shadow-emerald-950/24' + ? 'drop-shadow-[0_8px_12px_rgba(127,29,29,0.36)]' + : 'drop-shadow-[0_8px_12px_rgba(6,78,59,0.3)]' : owned ? isLeader ? 'border-cyan-100 bg-cyan-300 shadow-cyan-950/30' @@ -202,11 +203,10 @@ function BigFishEntityDot({ -
) : null} @@ -227,6 +227,8 @@ export function BigFishRuntimeShell({ }: BigFishRuntimeShellProps) { const stageRef = useRef(null); const [touchOrigin, setTouchOrigin] = useState(null); + const currentTouchRef = useRef(null); + const lastTouchSampleRef = useRef(null); const [isRuleModalOpen, setIsRuleModalOpen] = useState(false); const [stick, setStick] = useState({ x: 0, y: 0 }); const stickRef = useRef(stick); @@ -251,6 +253,31 @@ export function BigFishRuntimeShell({ }; }, [onSubmitInput, run?.status]); + useEffect(() => { + if (run?.status !== 'running' || !touchOrigin) { + return undefined; + } + + const timer = window.setInterval(() => { + const current = currentTouchRef.current; + const previous = lastTouchSampleRef.current; + if (!current || !previous || current.pointerId !== previous.pointerId) { + return; + } + + const sampledDirection = resolveDirectionFromSample(previous, current); + lastTouchSampleRef.current = { ...current }; + if (sampledDirection.x === 0 && sampledDirection.y === 0) { + return; + } + submitDirection(sampledDirection); + }, 100); + + return () => { + window.clearInterval(timer); + }; + }, [run?.status, touchOrigin]); + const submitDirection = (direction: SubmitBigFishInputRequest) => { setStick(direction); onSubmitInput(direction); @@ -260,22 +287,35 @@ export function BigFishRuntimeShell({ if (event.target instanceof HTMLElement && event.target.closest('button')) { return; } - event.currentTarget.setPointerCapture(event.pointerId); + if (!Number.isFinite(event.clientX) || !Number.isFinite(event.clientY)) { + return; + } + event.currentTarget.setPointerCapture?.(event.pointerId); setTouchOrigin({ pointerId: event.pointerId, x: event.clientX, y: event.clientY, }); - submitDirection({ x: 0, y: 0 }); + currentTouchRef.current = { + pointerId: event.pointerId, + x: event.clientX, + y: event.clientY, + }; + lastTouchSampleRef.current = { ...currentTouchRef.current }; }; const updateTouchControl = (event: PointerEvent) => { if (!touchOrigin || touchOrigin.pointerId !== event.pointerId) { return; } - submitDirection( - resolveDirectionFromOrigin(touchOrigin, event.clientX, event.clientY), - ); + if (!Number.isFinite(event.clientX) || !Number.isFinite(event.clientY)) { + return; + } + currentTouchRef.current = { + pointerId: event.pointerId, + x: event.clientX, + y: event.clientY, + }; }; const endTouchControl = (event: PointerEvent) => { @@ -283,7 +323,8 @@ export function BigFishRuntimeShell({ return; } setTouchOrigin(null); - submitDirection({ x: 0, y: 0 }); + currentTouchRef.current = null; + lastTouchSampleRef.current = null; }; if (!run) { diff --git a/src/components/platform-entry/PlatformEntryFlowShellImpl.tsx b/src/components/platform-entry/PlatformEntryFlowShellImpl.tsx index 925c7d16..e6cd9574 100644 --- a/src/components/platform-entry/PlatformEntryFlowShellImpl.tsx +++ b/src/components/platform-entry/PlatformEntryFlowShellImpl.tsx @@ -46,8 +46,8 @@ import { streamBigFishCreationMessage, } from '../../services/big-fish-creation'; import { - startBigFishRuntimeRun, - submitBigFishRuntimeInput, + advanceLocalBigFishRuntimeRun, + startLocalBigFishRuntimeRun, } from '../../services/big-fish-runtime'; import { listBigFishGallery } from '../../services/big-fish-gallery'; import { @@ -415,7 +415,6 @@ export function PlatformEntryFlowShellImpl({ const [isBigFishLoadingLibrary, setIsBigFishLoadingLibrary] = useState(false); const [bigFishGenerationState, setBigFishGenerationState] = useState(null); - const bigFishInputInFlightRef = useRef(false); const [puzzleOperation, setPuzzleOperation] = useState(null); const [puzzleWorks, setPuzzleWorks] = useState([]); @@ -1105,59 +1104,25 @@ export function PlatformEntryFlowShellImpl({ } }, [bigFishRun, bigFishSession, selectionStage, setSelectionStage]); - const startBigFishRun = useCallback(async () => { - if (!bigFishSession || isBigFishBusy) { + const startBigFishRun = useCallback(() => { + if (!bigFishSession) { return; } - setIsBigFishBusy(true); setBigFishError(null); + setBigFishRun(startLocalBigFishRuntimeRun({ session: bigFishSession })); + setSelectionStage('big-fish-runtime'); + }, [bigFishSession, setSelectionStage]); - try { - const { run } = await startBigFishRuntimeRun(bigFishSession.sessionId); - setBigFishRun(run); - setSelectionStage('big-fish-runtime'); - } catch (error) { - setBigFishError( - resolveBigFishErrorMessage(error, '启动大鱼吃小鱼测试玩法失败。'), - ); - } finally { - setIsBigFishBusy(false); - } - }, [ - bigFishSession, - isBigFishBusy, - resolveBigFishErrorMessage, - setSelectionStage, - ]); - - const restartBigFishRun = useCallback(async () => { - const sessionId = bigFishSession?.sessionId ?? bigFishRun?.sessionId; - if (!sessionId || isBigFishBusy) { + const restartBigFishRun = useCallback(() => { + if (!bigFishSession && !bigFishRun) { return; } - setIsBigFishBusy(true); setBigFishError(null); - - try { - const { run } = await startBigFishRuntimeRun(sessionId); - setBigFishRun(run); - setSelectionStage('big-fish-runtime'); - } catch (error) { - setBigFishError( - resolveBigFishErrorMessage(error, '重新开始大鱼吃小鱼玩法失败。'), - ); - } finally { - setIsBigFishBusy(false); - } - }, [ - bigFishRun?.sessionId, - bigFishSession?.sessionId, - isBigFishBusy, - resolveBigFishErrorMessage, - setSelectionStage, - ]); + setBigFishRun(startLocalBigFishRuntimeRun({ session: bigFishSession })); + setSelectionStage('big-fish-runtime'); + }, [bigFishRun, bigFishSession, setSelectionStage]); const startPuzzleRunFromProfile = useCallback( async (profileId: string) => { @@ -1236,29 +1201,15 @@ export function PlatformEntryFlowShellImpl({ const submitBigFishInput = useCallback( (payload: SubmitBigFishInputRequest) => { - if ( - !bigFishRun || - bigFishRun.status !== 'running' || - bigFishInputInFlightRef.current - ) { + if (!bigFishRun || bigFishRun.status !== 'running') { return; } - bigFishInputInFlightRef.current = true; - void submitBigFishRuntimeInput(bigFishRun.runId, payload) - .then(({ run }) => { - setBigFishRun(run); - }) - .catch((error) => { - setBigFishError( - resolveBigFishErrorMessage(error, '同步大鱼吃小鱼输入失败。'), - ); - }) - .finally(() => { - bigFishInputInFlightRef.current = false; - }); + setBigFishRun((currentRun) => + currentRun ? advanceLocalBigFishRuntimeRun(currentRun, payload) : currentRun, + ); }, - [bigFishRun, resolveBigFishErrorMessage], + [bigFishRun], ); const swapPuzzlePiecesInRun = useCallback( @@ -1636,30 +1587,19 @@ export function PlatformEntryFlowShellImpl({ ); const startBigFishRunFromWork = useCallback( - async (item: BigFishWorkSummary) => { + (item: BigFishWorkSummary) => { const sessionId = item.sourceSessionId?.trim(); if (!sessionId) { setBigFishError('当前作品缺少会话信息,暂时无法进入玩法。'); return; } - setIsBigFishBusy(true); setBigFishError(null); - - try { - const { run } = await startBigFishRuntimeRun(sessionId); - bigFishFlow.setSession(null); - setBigFishRun(run); - setSelectionStage('big-fish-runtime'); - } catch (error) { - setBigFishError( - resolveBigFishErrorMessage(error, '启动大鱼吃小鱼玩法失败。'), - ); - } finally { - setIsBigFishBusy(false); - } + bigFishFlow.setSession(null); + setBigFishRun(startLocalBigFishRuntimeRun({ work: item })); + setSelectionStage('big-fish-runtime'); }, - [bigFishFlow, resolveBigFishErrorMessage, setSelectionStage], + [bigFishFlow, setSelectionStage], ); const handlePublicCodeSearch = useCallback( diff --git a/src/components/rpg-entry/RpgEntryFlowShell.agent.interaction.test.tsx b/src/components/rpg-entry/RpgEntryFlowShell.agent.interaction.test.tsx index a0c89cbf..e2ed05a1 100644 --- a/src/components/rpg-entry/RpgEntryFlowShell.agent.interaction.test.tsx +++ b/src/components/rpg-entry/RpgEntryFlowShell.agent.interaction.test.tsx @@ -17,7 +17,7 @@ import { getBigFishCreationSession, } from '../../services/big-fish-creation'; import { listBigFishGallery } from '../../services/big-fish-gallery'; -import { startBigFishRuntimeRun } from '../../services/big-fish-runtime'; +import { startLocalBigFishRuntimeRun } from '../../services/big-fish-runtime'; import { listBigFishWorks } from '../../services/big-fish-works'; import { createPuzzleAgentSession, @@ -153,8 +153,8 @@ vi.mock('../../services/big-fish-gallery', () => ({ })); vi.mock('../../services/big-fish-runtime', () => ({ - startBigFishRuntimeRun: vi.fn(), - submitBigFishRuntimeInput: vi.fn(), + advanceLocalBigFishRuntimeRun: vi.fn((run) => run), + startLocalBigFishRuntimeRun: vi.fn(), })); vi.mock('../../services/puzzle-agent', () => ({ @@ -1033,30 +1033,28 @@ beforeEach(() => { vi.mocked(listBigFishGallery).mockResolvedValue({ items: [], }); - vi.mocked(startBigFishRuntimeRun).mockResolvedValue({ - run: { - runId: 'big-fish-run-1', - sessionId: 'big-fish-session-public-1', - status: 'running', - tick: 0, - playerLevel: 1, - winLevel: 8, - leaderEntityId: 'owned-1', - ownedEntities: [ - { - entityId: 'owned-1', - level: 1, - position: { x: 0, y: 0 }, - radius: 12, - offscreenSeconds: 0, - }, - ], - wildEntities: [], - cameraCenter: { x: 0, y: 0 }, - lastInput: { x: 0, y: 0 }, - eventLog: ['机械鱼群开始巡游。'], - updatedAt: '2026-04-25T12:12:00.000Z', - }, + vi.mocked(startLocalBigFishRuntimeRun).mockReturnValue({ + runId: 'big-fish-run-1', + sessionId: 'big-fish-session-public-1', + status: 'running', + tick: 0, + playerLevel: 1, + winLevel: 8, + leaderEntityId: 'owned-1', + ownedEntities: [ + { + entityId: 'owned-1', + level: 1, + position: { x: 0, y: 0 }, + radius: 12, + offscreenSeconds: 0, + }, + ], + wildEntities: [], + cameraCenter: { x: 0, y: 0 }, + lastInput: { x: 0, y: 0 }, + eventLog: ['机械鱼群开始巡游。'], + updatedAt: '2026-04-25T12:12:00.000Z', }); vi.mocked(listPuzzleWorks).mockResolvedValue({ items: [], @@ -1990,9 +1988,11 @@ test('public code search opens a published big fish work by BF code', async () = await user.click(screen.getByRole('button', { name: '搜索' })); await waitFor(() => { - expect(startBigFishRuntimeRun).toHaveBeenCalledWith( - 'big-fish-session-public-1', - ); + expect(startLocalBigFishRuntimeRun).toHaveBeenCalledWith({ + work: expect.objectContaining({ + sourceSessionId: 'big-fish-session-public-1', + }), + }); }); expect(await screen.findByText('Lv.1/8 · 进行中')).toBeTruthy(); expect(getBigFishCreationSession).not.toHaveBeenCalledWith( diff --git a/src/services/big-fish-runtime/bigFishLocalRuntime.ts b/src/services/big-fish-runtime/bigFishLocalRuntime.ts new file mode 100644 index 00000000..8ba019b8 --- /dev/null +++ b/src/services/big-fish-runtime/bigFishLocalRuntime.ts @@ -0,0 +1,395 @@ +import type { + BigFishGameDraftResponse, + BigFishRuntimeEntityResponse, + BigFishRuntimeSnapshotResponse, + BigFishSessionSnapshotResponse, + SubmitBigFishInputRequest, +} from '../../../packages/shared/src/contracts/bigFish'; +import type { BigFishWorkSummary } from '../../../packages/shared/src/contracts/bigFishWorkSummary'; + +const VIEW_WIDTH = 720; +const VIEW_HEIGHT = 1280; +const WORLD_HALF_WIDTH = 1400; +const WORLD_HALF_HEIGHT = 2400; +const DEFAULT_LEVEL_COUNT = 8; +const DEFAULT_WILD_COUNT = 28; +const LEADER_SPEED = 210; +const FOLLOWER_SPEED = 170; +const WILD_SPEED = 74; +const MERGE_COUNT = 3; + +function clamp(value: number, min: number, max: number) { + return Math.max(min, Math.min(max, value)); +} + +function entityRadius(level: number) { + return 18 + level * 4; +} + +function normalizeVector(x: number, y: number) { + const length = Math.hypot(x, y); + if (length <= 0.001) { + return { x: 0, y: 0 }; + } + return { x: x / length, y: y / length }; +} + +function distance( + first: BigFishRuntimeEntityResponse, + second: BigFishRuntimeEntityResponse, +) { + return Math.hypot( + first.position.x - second.position.x, + first.position.y - second.position.y, + ); +} + +function buildEntity( + entityId: string, + level: number, + x: number, + y: number, +): BigFishRuntimeEntityResponse { + return { + entityId, + level, + position: { x, y }, + radius: entityRadius(level), + offscreenSeconds: 0, + }; +} + +function resolveWinLevel( + draft?: BigFishGameDraftResponse | null, + work?: BigFishWorkSummary | null, +) { + return draft?.runtimeParams.winLevel ?? work?.levelCount ?? DEFAULT_LEVEL_COUNT; +} + +function resolveWildTargetCount(draft?: BigFishGameDraftResponse | null) { + return Math.max(DEFAULT_WILD_COUNT, draft?.runtimeParams.spawnTargetCount ?? 0); +} + +function spawnLevel(playerLevel: number, winLevel: number, index: number) { + if (playerLevel <= 1 && index % 4 < 2) { + return 1; + } + const deltas = [-2, -1, 1, 2]; + const delta = deltas[index % deltas.length] ?? 1; + return clamp(playerLevel + delta, 1, winLevel); +} + +function spawnPosition(center: { x: number; y: number }, index: number) { + const side = index % 4; + const offset = ((index * 97) % 980) - 490; + if (side === 0) { + return { x: center.x - VIEW_WIDTH * 0.72, y: center.y + offset }; + } + if (side === 1) { + return { x: center.x + VIEW_WIDTH * 0.72, y: center.y + offset }; + } + if (side === 2) { + return { x: center.x + offset, y: center.y - VIEW_HEIGHT * 0.64 }; + } + return { x: center.x + offset, y: center.y + VIEW_HEIGHT * 0.64 }; +} + +function buildWildEntity( + tick: number, + index: number, + playerLevel: number, + winLevel: number, + center: { x: number; y: number }, +) { + const level = spawnLevel(playerLevel, winLevel, index); + const position = spawnPosition(center, index); + return buildEntity(`wild-${tick}-${index}`, level, position.x, position.y); +} + +export function startLocalBigFishRuntimeRun({ + session, + work, +}: { + session?: BigFishSessionSnapshotResponse | null; + work?: BigFishWorkSummary | null; +}): BigFishRuntimeSnapshotResponse { + const winLevel = resolveWinLevel(session?.draft, work); + const wildCount = resolveWildTargetCount(session?.draft); + const leader = buildEntity('owned-1', 1, 0, 0); + const wildEntities = [ + buildEntity('wild-open-1', 1, 92, 0), + buildEntity('wild-open-2', 1, -118, 46), + ]; + while (wildEntities.length < wildCount) { + wildEntities.push( + buildWildEntity(0, wildEntities.length, 1, winLevel, leader.position), + ); + } + + return { + runId: `local-big-fish-run-${Date.now()}`, + sessionId: session?.sessionId ?? work?.sourceSessionId ?? 'local-big-fish-session', + status: 'running', + tick: 0, + playerLevel: 1, + winLevel, + leaderEntityId: leader.entityId, + ownedEntities: [leader], + wildEntities, + cameraCenter: { ...leader.position }, + lastInput: { x: 0, y: 0 }, + eventLog: ['开局生成同级可收编目标'], + updatedAt: new Date().toISOString(), + }; +} + +function moveLeader( + leader: BigFishRuntimeEntityResponse, + input: SubmitBigFishInputRequest, +) { + return { + ...leader, + position: { + x: clamp( + leader.position.x + input.x * LEADER_SPEED * 0.1, + -WORLD_HALF_WIDTH, + WORLD_HALF_WIDTH, + ), + y: clamp( + leader.position.y + input.y * LEADER_SPEED * 0.1, + -WORLD_HALF_HEIGHT, + WORLD_HALF_HEIGHT, + ), + }, + }; +} + +function moveFollower( + follower: BigFishRuntimeEntityResponse, + leader: BigFishRuntimeEntityResponse, + index: number, +) { + const slotY = Math.sin(index * 0.7) * 42; + const target = { + x: leader.position.x - 52 - index * 10, + y: leader.position.y + slotY, + }; + const delta = { + x: target.x - follower.position.x, + y: target.y - follower.position.y, + }; + const direction = normalizeVector(delta.x, delta.y); + const step = Math.min(FOLLOWER_SPEED * 0.1, Math.hypot(delta.x, delta.y)); + return { + ...follower, + position: { + x: follower.position.x + direction.x * step, + y: follower.position.y + direction.y * step, + }, + }; +} + +function moveWildEntity(entity: BigFishRuntimeEntityResponse, tick: number) { + const phase = tick * 0.23 + entity.level * 0.91 + entity.entityId.length * 0.13; + return { + ...entity, + position: { + x: clamp( + entity.position.x + Math.cos(phase) * (WILD_SPEED + entity.level * 3) * 0.1, + -WORLD_HALF_WIDTH, + WORLD_HALF_WIDTH, + ), + y: clamp( + entity.position.y + Math.sin(phase * 0.72) * (WILD_SPEED + entity.level * 3) * 0.1, + -WORLD_HALF_HEIGHT, + WORLD_HALF_HEIGHT, + ), + }, + }; +} + +function mergeOwnedEntities( + ownedEntities: BigFishRuntimeEntityResponse[], + tick: number, +) { + let nextOwned = [...ownedEntities]; + const events: string[] = []; + let changed = true; + + while (changed) { + changed = false; + for (let level = 1; level < 32; level += 1) { + const sameLevel = nextOwned + .map((entity, index) => ({ entity, index })) + .filter(({ entity }) => entity.level === level) + .slice(0, MERGE_COUNT); + if (sameLevel.length < MERGE_COUNT) { + continue; + } + + const center = sameLevel.reduce( + (acc, { entity }) => ({ + x: acc.x + entity.position.x / MERGE_COUNT, + y: acc.y + entity.position.y / MERGE_COUNT, + }), + { x: 0, y: 0 }, + ); + const removeSet = new Set(sameLevel.map(({ index }) => index)); + nextOwned = nextOwned.filter((_, index) => !removeSet.has(index)); + nextOwned.push( + buildEntity(`owned-merge-${level + 1}-${tick}`, level + 1, center.x, center.y), + ); + events.push(`3 个 ${level} 级实体合成 ${level + 1} 级`); + changed = true; + break; + } + } + + return { ownedEntities: nextOwned, events }; +} + +function isOffscreen( + entity: BigFishRuntimeEntityResponse, + cameraCenter: { x: number; y: number }, +) { + return ( + entity.position.x + entity.radius < cameraCenter.x - VIEW_WIDTH / 2 || + entity.position.x - entity.radius > cameraCenter.x + VIEW_WIDTH / 2 || + entity.position.y + entity.radius < cameraCenter.y - VIEW_HEIGHT / 2 || + entity.position.y - entity.radius > cameraCenter.y + VIEW_HEIGHT / 2 + ); +} + +function refreshLeader(ownedEntities: BigFishRuntimeEntityResponse[]) { + return [...ownedEntities].sort((left, right) => { + if (right.level !== left.level) { + return right.level - left.level; + } + return left.entityId.localeCompare(right.entityId); + }); +} + +export function advanceLocalBigFishRuntimeRun( + run: BigFishRuntimeSnapshotResponse, + input: SubmitBigFishInputRequest, +): BigFishRuntimeSnapshotResponse { + if (run.status !== 'running') { + return run; + } + + const nextTick = run.tick + 1; + const normalizedInput = normalizeVector(input.x, input.y); + const sortedOwned = refreshLeader(run.ownedEntities); + const currentLeader = sortedOwned[0]; + if (!currentLeader) { + return { ...run, status: 'failed', eventLog: ['己方实体归零,本局失败'] }; + } + + const nextLeader = moveLeader(currentLeader, normalizedInput); + let ownedEntities = [ + nextLeader, + ...sortedOwned.slice(1).map((entity, index) => + moveFollower(entity, nextLeader, index + 1), + ), + ]; + let wildEntities = run.wildEntities.map((entity) => + moveWildEntity(entity, nextTick), + ); + const events = [...run.eventLog]; + const removedWild = new Set(); + const removedOwned = new Set(); + const newlyOwned: BigFishRuntimeEntityResponse[] = []; + + for (const owned of ownedEntities) { + if (removedOwned.has(owned.entityId)) { + continue; + } + for (const wild of wildEntities) { + if (removedWild.has(wild.entityId)) { + continue; + } + if (distance(owned, wild) > owned.radius + wild.radius) { + continue; + } + if (owned.level >= wild.level) { + removedWild.add(wild.entityId); + newlyOwned.push( + buildEntity( + `owned-from-${wild.entityId}-${nextTick}`, + wild.level, + wild.position.x, + wild.position.y, + ), + ); + events.push(`收编 ${wild.level} 级实体`); + } else { + removedOwned.add(owned.entityId); + events.push(`${owned.level} 级己方实体被 ${wild.level} 级野生实体吃掉`); + } + } + } + + ownedEntities = ownedEntities + .filter((entity) => !removedOwned.has(entity.entityId)) + .concat(newlyOwned); + wildEntities = wildEntities.filter((entity) => !removedWild.has(entity.entityId)); + + const mergeResult = mergeOwnedEntities(ownedEntities, nextTick); + ownedEntities = refreshLeader(mergeResult.ownedEntities); + events.push(...mergeResult.events); + + const playerLevel = Math.max(...ownedEntities.map((entity) => entity.level), 0); + const leader = ownedEntities[0] ?? null; + const cameraCenter = leader ? { ...leader.position } : run.cameraCenter; + wildEntities = wildEntities + .map((entity) => { + const shouldCull = + entity.level === playerLevel || + entity.level >= playerLevel + 3 || + entity.level + 3 <= playerLevel; + const offscreenSeconds = + shouldCull && isOffscreen(entity, cameraCenter) + ? entity.offscreenSeconds + 0.1 + : 0; + return { ...entity, offscreenSeconds }; + }) + .filter((entity) => entity.offscreenSeconds < 3); + + while (wildEntities.length < DEFAULT_WILD_COUNT) { + wildEntities.push( + buildWildEntity( + nextTick, + wildEntities.length + nextTick, + Math.max(playerLevel, 1), + run.winLevel, + cameraCenter, + ), + ); + } + + const status = + ownedEntities.length === 0 + ? 'failed' + : playerLevel >= run.winLevel + ? 'won' + : 'running'; + if (status === 'failed') { + events.push('己方实体归零,本局失败'); + } else if (status === 'won') { + events.push('获得最高等级实体,通关'); + } + + return { + ...run, + status, + tick: nextTick, + playerLevel, + leaderEntityId: leader?.entityId ?? null, + ownedEntities, + wildEntities, + cameraCenter, + lastInput: normalizedInput, + eventLog: events.slice(-5), + updatedAt: new Date().toISOString(), + }; +} diff --git a/src/services/big-fish-runtime/bigFishRuntimeClient.ts b/src/services/big-fish-runtime/bigFishRuntimeClient.ts deleted file mode 100644 index cbacd889..00000000 --- a/src/services/big-fish-runtime/bigFishRuntimeClient.ts +++ /dev/null @@ -1,68 +0,0 @@ -import type { - BigFishRunResponse, - SubmitBigFishInputRequest, -} from '../../../packages/shared/src/contracts/bigFish'; -import { type ApiRetryOptions, requestJson } from '../apiClient'; - -const BIG_FISH_RUNTIME_API_BASE = '/api/runtime/big-fish'; -const BIG_FISH_RUNTIME_READ_RETRY: ApiRetryOptions = { - maxRetries: 1, - baseDelayMs: 120, - maxDelayMs: 360, -}; -const BIG_FISH_RUNTIME_WRITE_RETRY: ApiRetryOptions = { - maxRetries: 1, - baseDelayMs: 120, - maxDelayMs: 360, - retryUnsafeMethods: true, -}; - -export async function startBigFishRuntimeRun(sessionId: string) { - return requestJson( - `${BIG_FISH_RUNTIME_API_BASE}/sessions/${encodeURIComponent(sessionId)}/runs`, - { - method: 'POST', - }, - '启动大鱼吃小鱼测试玩法失败', - { - retry: BIG_FISH_RUNTIME_WRITE_RETRY, - }, - ); -} - -export async function getBigFishRuntimeRun(runId: string) { - return requestJson( - `${BIG_FISH_RUNTIME_API_BASE}/runs/${encodeURIComponent(runId)}`, - { - method: 'GET', - }, - '读取大鱼吃小鱼运行快照失败', - { - retry: BIG_FISH_RUNTIME_READ_RETRY, - }, - ); -} - -export async function submitBigFishRuntimeInput( - runId: string, - payload: SubmitBigFishInputRequest, -) { - return requestJson( - `${BIG_FISH_RUNTIME_API_BASE}/runs/${encodeURIComponent(runId)}/input`, - { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(payload), - }, - '提交大鱼吃小鱼移动输入失败', - { - retry: BIG_FISH_RUNTIME_WRITE_RETRY, - }, - ); -} - -export const bigFishRuntimeClient = { - startRun: startBigFishRuntimeRun, - getRun: getBigFishRuntimeRun, - submitInput: submitBigFishRuntimeInput, -}; diff --git a/src/services/big-fish-runtime/index.ts b/src/services/big-fish-runtime/index.ts index 8da02d2d..bf43b28c 100644 --- a/src/services/big-fish-runtime/index.ts +++ b/src/services/big-fish-runtime/index.ts @@ -1,6 +1,4 @@ export { - bigFishRuntimeClient, - getBigFishRuntimeRun, - startBigFishRuntimeRun, - submitBigFishRuntimeInput, -} from './bigFishRuntimeClient'; + advanceLocalBigFishRuntimeRun, + startLocalBigFishRuntimeRun, +} from './bigFishLocalRuntime';