Merge branch 'master' of http://82.157.175.59:3000/GenarrativeAI/Genarrative
Some checks failed
CI / verify (push) Has been cancelled

This commit is contained in:
2026-04-27 22:50:20 +08:00
60 changed files with 2980 additions and 1908 deletions

1
server-rs/Cargo.lock generated
View File

@@ -2698,6 +2698,7 @@ dependencies = [
"serde_json",
"shared-kernel",
"spacetimedb",
"spacetimedb-lib",
]
[[package]]

View File

@@ -65,7 +65,6 @@ const DATABASE_OVERVIEW_TABLES: &[&str] = &[
"big_fish_creation_session",
"big_fish_agent_message",
"big_fish_asset_slot",
"big_fish_runtime_run",
"puzzle_work_profile",
"puzzle_agent_session",
"puzzle_agent_message",

View File

@@ -33,9 +33,9 @@ use crate::{
auth_public_user::{get_public_user_by_code, get_public_user_by_id},
auth_sessions::auth_sessions,
big_fish::{
create_big_fish_session, delete_big_fish_work, execute_big_fish_action, get_big_fish_run,
get_big_fish_session, get_big_fish_works, list_big_fish_gallery, start_big_fish_run,
stream_big_fish_message, submit_big_fish_input, submit_big_fish_message,
create_big_fish_session, delete_big_fish_work, execute_big_fish_action,
get_big_fish_session, get_big_fish_works, list_big_fish_gallery, stream_big_fish_message,
submit_big_fish_message,
},
character_animation_assets::{
generate_character_animation, get_character_animation_job, get_character_workflow_cache,
@@ -575,27 +575,6 @@ pub fn build_router(state: AppState) -> Router {
require_bearer_auth,
)),
)
.route(
"/api/runtime/big-fish/sessions/{session_id}/runs",
post(start_big_fish_run).route_layer(middleware::from_fn_with_state(
state.clone(),
require_bearer_auth,
)),
)
.route(
"/api/runtime/big-fish/runs/{run_id}",
get(get_big_fish_run).route_layer(middleware::from_fn_with_state(
state.clone(),
require_bearer_auth,
)),
)
.route(
"/api/runtime/big-fish/runs/{run_id}/input",
post(submit_big_fish_input).route_layer(middleware::from_fn_with_state(
state.clone(),
require_bearer_auth,
)),
)
.route(
"/api/runtime/puzzle/agent/sessions",
post(create_puzzle_agent_session).route_layer(middleware::from_fn_with_state(
@@ -1445,6 +1424,34 @@ mod tests {
);
}
#[tokio::test]
async fn auth_login_options_keeps_password_entry_when_external_methods_disabled() {
let app = build_router(AppState::new(AppConfig::default()).expect("state should build"));
let response = app
.oneshot(
Request::builder()
.uri("/api/auth/login-options")
.body(Body::empty())
.expect("request should build"),
)
.await
.expect("request should succeed");
assert_eq!(response.status(), StatusCode::OK);
let body = response
.into_body()
.collect()
.await
.expect("body should collect")
.to_bytes();
let payload: Value = serde_json::from_slice(&body).expect("body should be valid json");
assert_eq!(
payload["availableLoginMethods"],
serde_json::json!(["password"])
);
}
#[tokio::test]
async fn send_phone_code_returns_mock_cooldown_and_expire_seconds() {
let config = AppConfig {

View File

@@ -23,10 +23,8 @@ use shared_contracts::big_fish::{
BigFishActionResponse, BigFishAgentMessageResponse, BigFishAnchorItemResponse,
BigFishAnchorPackResponse, BigFishAssetCoverageResponse, BigFishAssetSlotResponse,
BigFishBackgroundBlueprintResponse, BigFishGameDraftResponse, BigFishLevelBlueprintResponse,
BigFishRunResponse, BigFishRuntimeEntityResponse, BigFishRuntimeParamsResponse,
BigFishRuntimeSnapshotResponse, BigFishSessionResponse, BigFishSessionSnapshotResponse,
BigFishVector2Response, CreateBigFishSessionRequest, ExecuteBigFishActionRequest,
SendBigFishMessageRequest, SubmitBigFishInputRequest,
BigFishRuntimeParamsResponse, BigFishSessionResponse, BigFishSessionSnapshotResponse,
CreateBigFishSessionRequest, ExecuteBigFishActionRequest, SendBigFishMessageRequest,
};
use shared_contracts::big_fish_works::{BigFishWorkSummaryResponse, BigFishWorksResponse};
use shared_kernel::{build_prefixed_uuid_id, format_timestamp_micros};
@@ -34,10 +32,8 @@ use spacetime_client::{
BigFishAgentMessageRecord, BigFishAnchorItemRecord, BigFishAnchorPackRecord,
BigFishAssetCoverageRecord, BigFishAssetGenerateRecordInput, BigFishAssetSlotRecord,
BigFishBackgroundBlueprintRecord, BigFishGameDraftRecord, BigFishLevelBlueprintRecord,
BigFishMessageSubmitRecordInput, BigFishRunInputSubmitRecordInput, BigFishRunStartRecordInput,
BigFishRuntimeEntityRecord, BigFishRuntimeParamsRecord, BigFishRuntimeRecord,
BigFishSessionCreateRecordInput, BigFishSessionRecord, BigFishVector2Record,
BigFishWorkSummaryRecord, SpacetimeClientError,
BigFishMessageSubmitRecordInput, BigFishRuntimeParamsRecord, BigFishSessionCreateRecordInput,
BigFishSessionRecord, BigFishWorkSummaryRecord, SpacetimeClientError,
};
use tokio::time::sleep;
@@ -577,99 +573,6 @@ pub async fn execute_big_fish_action(
))
}
pub async fn start_big_fish_run(
State(state): State<AppState>,
Path(session_id): Path<String>,
Extension(request_context): Extension<RequestContext>,
Extension(authenticated): Extension<AuthenticatedAccessToken>,
) -> Result<Json<Value>, Response> {
ensure_non_empty(&request_context, &session_id, "sessionId")?;
let run = state
.spacetime_client()
.start_big_fish_run(BigFishRunStartRecordInput {
run_id: build_prefixed_uuid_id("big-fish-run-"),
session_id,
owner_user_id: authenticated.claims().user_id().to_string(),
started_at_micros: current_utc_micros(),
})
.await
.map_err(|error| {
big_fish_error_response(&request_context, map_big_fish_client_error(error))
})?;
Ok(json_success_body(
Some(&request_context),
BigFishRunResponse {
run: map_big_fish_runtime_response(run),
},
))
}
pub async fn get_big_fish_run(
State(state): State<AppState>,
Path(run_id): Path<String>,
Extension(request_context): Extension<RequestContext>,
Extension(authenticated): Extension<AuthenticatedAccessToken>,
) -> Result<Json<Value>, Response> {
ensure_non_empty(&request_context, &run_id, "runId")?;
let run = state
.spacetime_client()
.get_big_fish_run(run_id, authenticated.claims().user_id().to_string())
.await
.map_err(|error| {
big_fish_error_response(&request_context, map_big_fish_client_error(error))
})?;
Ok(json_success_body(
Some(&request_context),
BigFishRunResponse {
run: map_big_fish_runtime_response(run),
},
))
}
pub async fn submit_big_fish_input(
State(state): State<AppState>,
Path(run_id): Path<String>,
Extension(request_context): Extension<RequestContext>,
Extension(authenticated): Extension<AuthenticatedAccessToken>,
payload: Result<Json<SubmitBigFishInputRequest>, JsonRejection>,
) -> Result<Json<Value>, Response> {
let Json(payload) = payload.map_err(|error| {
big_fish_error_response(
&request_context,
AppError::from_status(StatusCode::BAD_REQUEST).with_details(json!({
"provider": "big-fish",
"message": error.body_text(),
})),
)
})?;
ensure_non_empty(&request_context, &run_id, "runId")?;
let run = state
.spacetime_client()
.submit_big_fish_input(BigFishRunInputSubmitRecordInput {
run_id,
owner_user_id: authenticated.claims().user_id().to_string(),
input_x: payload.x,
input_y: payload.y,
submitted_at_micros: current_utc_micros(),
})
.await
.map_err(|error| {
big_fish_error_response(&request_context, map_big_fish_client_error(error))
})?;
Ok(json_success_body(
Some(&request_context),
BigFishRunResponse {
run: map_big_fish_runtime_response(run),
},
))
}
fn map_big_fish_session_response(session: BigFishSessionRecord) -> BigFishSessionSnapshotResponse {
BigFishSessionSnapshotResponse {
session_id: session.session_id,
@@ -910,32 +813,6 @@ fn map_big_fish_agent_message_response(
}
}
fn map_big_fish_runtime_response(run: BigFishRuntimeRecord) -> BigFishRuntimeSnapshotResponse {
BigFishRuntimeSnapshotResponse {
run_id: run.run_id,
session_id: run.session_id,
status: run.status,
tick: run.tick,
player_level: run.player_level,
win_level: run.win_level,
leader_entity_id: run.leader_entity_id,
owned_entities: run
.owned_entities
.into_iter()
.map(map_big_fish_entity_response)
.collect(),
wild_entities: run
.wild_entities
.into_iter()
.map(map_big_fish_entity_response)
.collect(),
camera_center: map_big_fish_vector_response(run.camera_center),
last_input: map_big_fish_vector_response(run.last_input),
event_log: run.event_log,
updated_at: run.updated_at,
}
}
fn map_big_fish_work_summary_response(
item: BigFishWorkSummaryRecord,
) -> BigFishWorkSummaryResponse {
@@ -957,25 +834,6 @@ fn map_big_fish_work_summary_response(
}
}
fn map_big_fish_entity_response(
entity: BigFishRuntimeEntityRecord,
) -> BigFishRuntimeEntityResponse {
BigFishRuntimeEntityResponse {
entity_id: entity.entity_id,
level: entity.level,
position: map_big_fish_vector_response(entity.position),
radius: entity.radius,
offscreen_seconds: entity.offscreen_seconds,
}
}
fn map_big_fish_vector_response(vector: BigFishVector2Record) -> BigFishVector2Response {
BigFishVector2Response {
x: vector.x,
y: vector.y,
}
}
fn build_big_fish_welcome_text(seed_text: &str) -> String {
if seed_text.trim().is_empty() {
return "我会先帮你确定大鱼吃小鱼的核心锚点。可以从主题生态、成长阶梯或风险节奏开始。"
@@ -1013,7 +871,8 @@ struct BigFishFormalAssetContext {
const BIG_FISH_TEXT_TO_IMAGE_MODEL: &str = "wan2.2-t2i-flash";
const BIG_FISH_ENTITY_KIND: &str = "big_fish_session";
const BIG_FISH_DEFAULT_NEGATIVE_PROMPT: &str = "文字水印logoUI界面对话框边框多余肢体畸形鱼体低清晰度模糊压缩噪点现代摄影棚写实照片背景";
const BIG_FISH_DEFAULT_NEGATIVE_PROMPT: &str = "文字水印logoUI界面对话框边框多余肢体畸形鱼体低清晰度模糊压缩噪点现代摄影棚写实照片背景,复杂背景";
const BIG_FISH_TRANSPARENT_ASSET_NEGATIVE_PROMPT: &str = "文字水印logoUI界面对话框边框多余肢体畸形鱼体低清晰度模糊压缩噪点现代摄影棚写实照片背景场景背景水草背景气泡背景多只主体阴影地面";
async fn generate_big_fish_formal_asset(
state: &AppState,
@@ -1087,7 +946,7 @@ fn build_big_fish_formal_asset_context(
Ok(BigFishFormalAssetContext {
entity_id: session.session_id.clone(),
prompt: build_big_fish_level_main_image_prompt(draft, level),
negative_prompt: BIG_FISH_DEFAULT_NEGATIVE_PROMPT.to_string(),
negative_prompt: BIG_FISH_TRANSPARENT_ASSET_NEGATIVE_PROMPT.to_string(),
size: "1024*1024".to_string(),
asset_object_kind: "big_fish_level_main_image".to_string(),
binding_slot: format!("level_main_image:{level_part}"),
@@ -1114,7 +973,7 @@ fn build_big_fish_formal_asset_context(
Ok(BigFishFormalAssetContext {
entity_id: session.session_id.clone(),
prompt: build_big_fish_level_motion_prompt(draft, level, motion_key),
negative_prompt: BIG_FISH_DEFAULT_NEGATIVE_PROMPT.to_string(),
negative_prompt: BIG_FISH_TRANSPARENT_ASSET_NEGATIVE_PROMPT.to_string(),
size: "1024*1024".to_string(),
asset_object_kind: "big_fish_level_motion".to_string(),
binding_slot: format!("level_motion:{level_part}:{motion_key}"),
@@ -1190,8 +1049,8 @@ fn build_big_fish_level_main_image_prompt(
),
format!("轮廓方向:{}", level.silhouette_direction),
format!("视觉提示词种子:{}", level.visual_prompt_seed),
"画面要求:单体游戏生物完整入镜,轮廓清晰,适合作为大鱼吃小鱼等级角色主2D 高完成度游戏插画,深海发光质感,中央构图".to_string(),
"不要出现 UI、文字、logo、水印、对话框或边框背景保持干净的深海渐变或透明感,不要出现多只主体。".to_string(),
"画面要求:按 RPG 角色资产口径生成,单体鱼形游戏生物完整入镜,轮廓清晰,中心构2D 高完成度游戏插画,深海发光质感。".to_string(),
"背景要求:透明背景 PNG 风格,不出现任何场景、水草、气泡、阴影地面、UI、文字、logo、水印、对话框或边框不要出现多只主体。".to_string(),
]
.join("")
}
@@ -1217,8 +1076,8 @@ fn build_big_fish_level_motion_prompt(
),
format!("动作提示词种子:{}", level.motion_prompt_seed),
format!("动作要求:{motion_text}"),
"画面要求:单体生物完整入镜轮廓清晰动作方向明确2D 高完成度游戏插画,适合作为 Big Fish 动作槽位的静态 keyframe。".to_string(),
"不要出现 UI、文字、logo、水印、对话框或边框不要生成序列帧拼图不要出现多只主体。".to_string(),
"画面要求:按 RPG 角色动画资产口径生成,单体鱼形生物完整入镜轮廓清晰动作方向明确2D 高完成度游戏插画,适合作为 Big Fish 动作槽位的静态 keyframe。".to_string(),
"背景要求:透明背景 PNG 风格,不出现任何场景、水草、气泡、阴影地面、UI、文字、logo、水印、对话框或边框不要生成序列帧拼图不要出现多只主体。".to_string(),
]
.join("")
}
@@ -1238,8 +1097,8 @@ fn build_big_fish_stage_background_prompt(draft: &BigFishGameDraftRecord) -> Str
format!("安全操作区:{}", background.safe_play_area_hint),
format!("出生边缘:{}", background.spawn_edge_hint),
format!("背景提示词种子:{}", background.background_prompt_seed),
"画面要求:竖屏 9:16中央 70% 保持清爽可读,边缘有深海生态层次和微弱生物光,适合作为大鱼吃小鱼运行态背景".to_string(),
"不要出现 UI、文字、logo、水印、对话框边框或巨大主体遮挡;不要把中央操作区画得过暗或过复杂。".to_string(),
"画面要求:竖屏 9:16大场地,全屏运行态背景,中央 80% 保持开阔清爽,边缘只保留少量出生区环境提示".to_string(),
"元素要求:整体元素少,不出现大型主体、密集装饰、鱼群主角、UI、文字、logo、水印、对话框边框;不要把中央操作区画得过暗或过复杂。".to_string(),
]
.join("")
}
@@ -1769,8 +1628,7 @@ fn big_fish_sse_error_event_message(message: String) -> Event {
fn map_big_fish_client_error(error: SpacetimeClientError) -> AppError {
let status = match &error {
SpacetimeClientError::Procedure(message)
if message.contains("big_fish_creation_session 不存在")
|| message.contains("big_fish_runtime_run 不存在") =>
if message.contains("big_fish_creation_session 不存在") =>
{
StatusCode::NOT_FOUND
}

View File

@@ -9,17 +9,12 @@ pub const BIG_FISH_SESSION_ID_PREFIX: &str = "big-fish-session-";
pub const BIG_FISH_MESSAGE_ID_PREFIX: &str = "big-fish-message-";
pub const BIG_FISH_OPERATION_ID_PREFIX: &str = "big-fish-operation-";
pub const BIG_FISH_ASSET_SLOT_ID_PREFIX: &str = "big-fish-asset-";
pub const BIG_FISH_RUN_ID_PREFIX: &str = "big-fish-run-";
pub const BIG_FISH_DEFAULT_LEVEL_COUNT: u32 = 8;
pub const BIG_FISH_MIN_LEVEL_COUNT: u32 = 6;
pub const BIG_FISH_MAX_LEVEL_COUNT: u32 = 12;
pub const BIG_FISH_MERGE_COUNT_PER_UPGRADE: u32 = 3;
pub const BIG_FISH_OFFSCREEN_CULL_SECONDS: f32 = 3.0;
pub const BIG_FISH_TARGET_WILD_COUNT: usize = 12;
pub const BIG_FISH_VIEW_WIDTH: f32 = 720.0;
pub const BIG_FISH_VIEW_HEIGHT: f32 = 1280.0;
pub const BIG_FISH_WORLD_HALF_WIDTH: f32 = 900.0;
pub const BIG_FISH_WORLD_HALF_HEIGHT: f32 = 1600.0;
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
@@ -72,14 +67,6 @@ pub enum BigFishAssetStatus {
Ready,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum BigFishRunStatus {
Running,
Won,
Failed,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct BigFishAnchorItem {
@@ -209,41 +196,6 @@ pub struct BigFishSessionSnapshot {
pub updated_at_micros: i64,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BigFishVector2 {
pub x: f32,
pub y: f32,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BigFishRuntimeEntity {
pub entity_id: String,
pub level: u32,
pub position: BigFishVector2,
pub radius: f32,
pub offscreen_seconds: f32,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BigFishRuntimeSnapshot {
pub run_id: String,
pub session_id: String,
pub status: BigFishRunStatus,
pub tick: u64,
pub player_level: u32,
pub win_level: u32,
pub leader_entity_id: Option<String>,
pub owned_entities: Vec<BigFishRuntimeEntity>,
pub wild_entities: Vec<BigFishRuntimeEntity>,
pub camera_center: BigFishVector2,
pub last_input: BigFishVector2,
pub event_log: Vec<String>,
pub updated_at_micros: i64,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BigFishSessionProcedureResult {
@@ -293,14 +245,6 @@ pub struct BigFishWorksProcedureResult {
pub error_message: Option<String>,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BigFishRunProcedureResult {
pub ok: bool,
pub run: Option<BigFishRuntimeSnapshot>,
pub error_message: Option<String>,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct BigFishSessionCreateInput {
@@ -372,43 +316,15 @@ pub struct BigFishPublishInput {
pub published_at_micros: i64,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct BigFishRunStartInput {
pub run_id: String,
pub session_id: String,
pub owner_user_id: String,
pub started_at_micros: i64,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BigFishRunInputSubmitInput {
pub run_id: String,
pub owner_user_id: String,
pub input_x: f32,
pub input_y: f32,
pub submitted_at_micros: i64,
}
#[cfg_attr(feature = "spacetime-types", derive(SpacetimeType))]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct BigFishRunGetInput {
pub run_id: String,
pub owner_user_id: String,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BigFishFieldError {
MissingSessionId,
MissingOwnerUserId,
MissingMessageId,
MissingMessageText,
MissingRunId,
MissingDraft,
InvalidLevel,
InvalidAssetKind,
InvalidRunState,
}
impl BigFishCreationStage {
@@ -474,16 +390,6 @@ impl BigFishAssetStatus {
}
}
impl BigFishRunStatus {
pub fn as_str(self) -> &'static str {
match self {
Self::Running => "running",
Self::Won => "won",
Self::Failed => "failed",
}
}
}
pub fn empty_anchor_pack() -> BigFishAnchorPack {
BigFishAnchorPack {
gameplay_promise: BigFishAnchorItem {
@@ -565,12 +471,14 @@ pub fn compile_default_draft(anchor_pack: &BigFishAnchorPack) -> BigFishGameDraf
background: BigFishBackgroundBlueprint {
theme: theme.clone(),
color_mood: "深蓝、青绿、带少量暖色生物光".to_string(),
foreground_hints: "轻微漂浮颗粒和边缘水草,不遮挡中央操作区".to_string(),
midground_composition: "中央留出清晰活动区域,边缘出生缓冲层".to_string(),
background_depth: "纵深水域与远处体型剪影".to_string(),
safe_play_area_hint: "9:16 竖屏中央 70% 为主要活动区".to_string(),
spawn_edge_hint: "四周边缘作为野生实体出生区".to_string(),
background_prompt_seed: format!("{theme},竖屏 9:16全屏游戏背景无文字无 UI 框"),
foreground_hints: "只保留少量漂浮颗粒和边缘水草,不遮挡中央操作区".to_string(),
midground_composition: "中央留出大面积清晰活动区域,边缘只做出生缓冲层".to_string(),
background_depth: "简洁纵深水域与极少量远处剪影".to_string(),
safe_play_area_hint: "9:16 竖屏中央 80% 为主要活动区".to_string(),
spawn_edge_hint: "四周边缘以少量暗礁或水草提示野生实体出生区".to_string(),
background_prompt_seed: format!(
"{theme},竖屏 9:16全屏大场地游戏背景元素少中央开阔无文字无 UI 框"
),
},
runtime_params: BigFishRuntimeParams {
level_count,
@@ -673,77 +581,6 @@ pub fn build_generated_asset_slot(
})
}
pub fn build_initial_runtime_snapshot(
run_id: String,
session_id: String,
draft: &BigFishGameDraft,
now_micros: i64,
) -> BigFishRuntimeSnapshot {
let mut snapshot = BigFishRuntimeSnapshot {
run_id,
session_id,
status: BigFishRunStatus::Running,
tick: 0,
player_level: 1,
win_level: draft.runtime_params.win_level,
leader_entity_id: Some("owned-1".to_string()),
owned_entities: vec![BigFishRuntimeEntity {
entity_id: "owned-1".to_string(),
level: 1,
position: BigFishVector2 { x: 0.0, y: 0.0 },
radius: entity_radius(1),
offscreen_seconds: 0.0,
}],
wild_entities: vec![
BigFishRuntimeEntity {
entity_id: "wild-open-1".to_string(),
level: 1,
position: BigFishVector2 { x: 72.0, y: 0.0 },
radius: entity_radius(1),
offscreen_seconds: 0.0,
},
BigFishRuntimeEntity {
entity_id: "wild-open-2".to_string(),
level: 1,
position: BigFishVector2 { x: -88.0, y: 30.0 },
radius: entity_radius(1),
offscreen_seconds: 0.0,
},
],
camera_center: BigFishVector2 { x: 0.0, y: 0.0 },
last_input: BigFishVector2 { x: 0.0, y: 0.0 },
event_log: vec!["开局生成 2 个同级可收编目标".to_string()],
updated_at_micros: now_micros,
};
maintain_wild_pool(&mut snapshot, &draft.runtime_params);
snapshot
}
pub fn advance_runtime_snapshot(
mut snapshot: BigFishRuntimeSnapshot,
params: &BigFishRuntimeParams,
input_x: f32,
input_y: f32,
now_micros: i64,
) -> BigFishRuntimeSnapshot {
if snapshot.status != BigFishRunStatus::Running {
return snapshot;
}
let step_seconds = resolve_step_seconds(&snapshot, now_micros);
snapshot.tick = snapshot.tick.saturating_add(1);
snapshot.last_input = normalize_input(input_x, input_y);
move_owned_entities(&mut snapshot, params, step_seconds);
resolve_collisions(&mut snapshot, params);
apply_chain_merges(&mut snapshot, params);
refresh_player_leader(&mut snapshot);
apply_win_or_fail(&mut snapshot, params);
update_wild_culling(&mut snapshot, params, step_seconds);
maintain_wild_pool(&mut snapshot, params);
snapshot.updated_at_micros = now_micros;
snapshot
}
pub fn validate_session_get_input(input: &BigFishSessionGetInput) -> Result<(), BigFishFieldError> {
validate_session_owner(&input.session_id, &input.owner_user_id)
}
@@ -817,36 +654,6 @@ pub fn validate_publish_input(input: &BigFishPublishInput) -> Result<(), BigFish
validate_session_owner(&input.session_id, &input.owner_user_id)
}
pub fn validate_run_start_input(input: &BigFishRunStartInput) -> Result<(), BigFishFieldError> {
validate_session_owner(&input.session_id, &input.owner_user_id)?;
if normalize_required_string(&input.run_id).is_none() {
return Err(BigFishFieldError::MissingRunId);
}
Ok(())
}
pub fn validate_run_get_input(input: &BigFishRunGetInput) -> Result<(), BigFishFieldError> {
if normalize_required_string(&input.run_id).is_none() {
return Err(BigFishFieldError::MissingRunId);
}
if normalize_required_string(&input.owner_user_id).is_none() {
return Err(BigFishFieldError::MissingOwnerUserId);
}
Ok(())
}
pub fn validate_run_input_submit_input(
input: &BigFishRunInputSubmitInput,
) -> Result<(), BigFishFieldError> {
if normalize_required_string(&input.run_id).is_none() {
return Err(BigFishFieldError::MissingRunId);
}
if normalize_required_string(&input.owner_user_id).is_none() {
return Err(BigFishFieldError::MissingOwnerUserId);
}
Ok(())
}
pub fn serialize_anchor_pack(anchor_pack: &BigFishAnchorPack) -> Result<String, serde_json::Error> {
serde_json::to_string(anchor_pack)
}
@@ -873,18 +680,6 @@ pub fn deserialize_asset_coverage(value: &str) -> Result<BigFishAssetCoverage, s
serde_json::from_str(value)
}
pub fn serialize_runtime_snapshot(
snapshot: &BigFishRuntimeSnapshot,
) -> Result<String, serde_json::Error> {
serde_json::to_string(snapshot)
}
pub fn deserialize_runtime_snapshot(
value: &str,
) -> Result<BigFishRuntimeSnapshot, serde_json::Error> {
serde_json::from_str(value)
}
fn fallback_anchor_value(anchor: &BigFishAnchorItem, fallback: &str) -> String {
normalize_required_string(&anchor.value).unwrap_or_else(|| fallback.to_string())
}
@@ -911,8 +706,12 @@ fn build_level_blueprint(level: u32, level_count: u32, theme: &str) -> BigFishLe
1.0 + level as f32 * 0.22
),
size_ratio: 1.0 + (level.saturating_sub(1) as f32 * 0.22),
visual_prompt_seed: format!("{theme} 第 {level} 级实体主图,透明背景,清晰轮廓"),
motion_prompt_seed: format!("{theme} 第 {level} 级实体 idle_float 与 move_swim 动作"),
visual_prompt_seed: format!(
"{theme} 第 {level} 级鱼形实体主图RPG 角色资产口径,透明背景,单体完整入镜,清晰轮廓"
),
motion_prompt_seed: format!(
"{theme} 第 {level} 级鱼形实体 idle_float 与 move_swim 动作RPG 角色动画资产口径,透明背景"
),
merge_source_level: if level == 1 { None } else { Some(level - 1) },
prey_window,
threat_window,
@@ -945,7 +744,7 @@ fn build_asset_prompt_snapshot(
.ok_or(BigFishFieldError::InvalidLevel)?;
let motion_key = motion_key.ok_or(BigFishFieldError::InvalidAssetKind)?;
Ok(format!(
"{},动作位:{}",
"{},动作位:{},透明背景,单体完整入镜",
blueprint.motion_prompt_seed, motion_key
))
}
@@ -1004,311 +803,6 @@ fn validate_level(level: Option<u32>, draft: &BigFishGameDraft) -> Result<(), Bi
}
}
fn normalize_input(x: f32, y: f32) -> BigFishVector2 {
let length = (x * x + y * y).sqrt();
if length <= 1.0 {
return BigFishVector2 { x, y };
}
BigFishVector2 {
x: x / length,
y: y / length,
}
}
/// 运行态仍由 `POST input` 触发推进,因此“屏外 3 秒”这类规则必须按真实秒数累计,
/// 否则会随着输入频率变化而漂移。
fn resolve_step_seconds(snapshot: &BigFishRuntimeSnapshot, now_micros: i64) -> f32 {
((now_micros - snapshot.updated_at_micros).max(0) as f32) / 1_000_000.0
}
fn move_owned_entities(
snapshot: &mut BigFishRuntimeSnapshot,
params: &BigFishRuntimeParams,
step_seconds: f32,
) {
let input = snapshot.last_input.clone();
if let Some(leader) = snapshot.owned_entities.first_mut() {
leader.position.x = clamp_world(
leader.position.x + input.x * params.leader_move_speed * step_seconds,
true,
);
leader.position.y = clamp_world(
leader.position.y + input.y * params.leader_move_speed * step_seconds,
false,
);
snapshot.camera_center = leader.position.clone();
}
let leader_position = snapshot.camera_center.clone();
for (index, follower) in snapshot.owned_entities.iter_mut().enumerate().skip(1) {
let slot_offset = ((index as f32) * 0.7).sin() * 36.0;
let target = BigFishVector2 {
x: leader_position.x - 42.0 - index as f32 * 8.0,
y: leader_position.y + slot_offset,
};
let delta_x = target.x - follower.position.x;
let delta_y = target.y - follower.position.y;
let distance = (delta_x * delta_x + delta_y * delta_y).sqrt();
if distance <= f32::EPSILON {
continue;
}
let catch_up_ratio =
(params.follower_catch_up_speed * step_seconds / distance).clamp(0.0, 1.0);
follower.position.x += delta_x * catch_up_ratio;
follower.position.y += delta_y * catch_up_ratio;
}
}
fn resolve_collisions(snapshot: &mut BigFishRuntimeSnapshot, _params: &BigFishRuntimeParams) {
let mut owned_to_remove = Vec::new();
let mut wild_to_remove = Vec::new();
let mut newly_owned = Vec::new();
for (owned_index, owned) in snapshot.owned_entities.iter().enumerate() {
for (wild_index, wild) in snapshot.wild_entities.iter().enumerate() {
if wild_to_remove.contains(&wild_index) || owned_to_remove.contains(&owned_index) {
continue;
}
if distance(&owned.position, &wild.position) > owned.radius + wild.radius {
continue;
}
if owned.level >= wild.level {
wild_to_remove.push(wild_index);
newly_owned.push(BigFishRuntimeEntity {
entity_id: format!("owned-from-{}-{}", wild.entity_id, snapshot.tick),
level: wild.level,
position: wild.position.clone(),
radius: entity_radius(wild.level),
offscreen_seconds: 0.0,
});
snapshot
.event_log
.push(format!("收编 {} 级实体", wild.level));
} else {
owned_to_remove.push(owned_index);
snapshot.event_log.push(format!(
"{} 级己方实体被 {} 级野生实体吃掉",
owned.level, wild.level
));
}
}
}
remove_indices(&mut snapshot.wild_entities, &wild_to_remove);
remove_indices(&mut snapshot.owned_entities, &owned_to_remove);
snapshot.owned_entities.extend(newly_owned);
}
fn apply_chain_merges(snapshot: &mut BigFishRuntimeSnapshot, params: &BigFishRuntimeParams) {
loop {
let mut merged = false;
for level in 1..params.win_level {
let indices = snapshot
.owned_entities
.iter()
.enumerate()
.filter_map(|(index, entity)| (entity.level == level).then_some(index))
.take(params.merge_count_per_upgrade as usize)
.collect::<Vec<_>>();
if indices.len() < params.merge_count_per_upgrade as usize {
continue;
}
let center = average_position(&indices, &snapshot.owned_entities);
remove_indices(&mut snapshot.owned_entities, &indices);
snapshot.owned_entities.push(BigFishRuntimeEntity {
entity_id: format!("owned-merge-{}-{}", level + 1, snapshot.tick),
level: level + 1,
position: center,
radius: entity_radius(level + 1),
offscreen_seconds: 0.0,
});
snapshot
.event_log
.push(format!("3 个 {} 级实体合成 {}", level, level + 1));
merged = true;
break;
}
if !merged {
break;
}
}
}
fn refresh_player_leader(snapshot: &mut BigFishRuntimeSnapshot) {
snapshot.owned_entities.sort_by(|left, right| {
right
.level
.cmp(&left.level)
.then_with(|| {
distance(&left.position, &snapshot.camera_center)
.partial_cmp(&distance(&right.position, &snapshot.camera_center))
.unwrap_or(std::cmp::Ordering::Equal)
})
.then_with(|| left.entity_id.cmp(&right.entity_id))
});
snapshot.leader_entity_id = snapshot
.owned_entities
.first()
.map(|entity| entity.entity_id.clone());
snapshot.player_level = snapshot
.owned_entities
.iter()
.map(|entity| entity.level)
.max()
.unwrap_or(0);
if let Some(leader) = snapshot.owned_entities.first() {
snapshot.camera_center = leader.position.clone();
}
}
fn apply_win_or_fail(snapshot: &mut BigFishRuntimeSnapshot, params: &BigFishRuntimeParams) {
if snapshot.owned_entities.is_empty() {
snapshot.status = BigFishRunStatus::Failed;
snapshot
.event_log
.push("己方实体归零,本局失败".to_string());
return;
}
if snapshot.player_level >= params.win_level {
snapshot.status = BigFishRunStatus::Won;
snapshot
.event_log
.push("获得最高等级实体,通关".to_string());
}
}
fn update_wild_culling(
snapshot: &mut BigFishRuntimeSnapshot,
params: &BigFishRuntimeParams,
step_seconds: f32,
) {
let player_level = snapshot.player_level;
for wild in &mut snapshot.wild_entities {
let should_cull_level = wild.level == player_level
|| wild.level >= player_level.saturating_add(3)
|| wild.level.saturating_add(3) <= player_level;
if !should_cull_level {
wild.offscreen_seconds = 0.0;
continue;
}
if is_offscreen(&wild.position, &snapshot.camera_center, wild.radius) {
wild.offscreen_seconds += step_seconds;
} else {
wild.offscreen_seconds = 0.0;
}
}
snapshot
.wild_entities
.retain(|wild| wild.offscreen_seconds < params.offscreen_cull_seconds);
}
fn maintain_wild_pool(snapshot: &mut BigFishRuntimeSnapshot, params: &BigFishRuntimeParams) {
if snapshot.status != BigFishRunStatus::Running {
return;
}
let mut next_index = snapshot.wild_entities.len() + snapshot.tick as usize;
while snapshot.wild_entities.len() < params.spawn_target_count as usize {
let level = next_spawn_level(snapshot.player_level.max(1), params.win_level, next_index);
snapshot.wild_entities.push(BigFishRuntimeEntity {
entity_id: format!("wild-{}-{}", snapshot.tick, next_index),
level,
position: spawn_position(&snapshot.camera_center, next_index),
radius: entity_radius(level),
offscreen_seconds: 0.0,
});
next_index += 1;
}
}
fn next_spawn_level(player_level: u32, win_level: u32, index: usize) -> u32 {
if player_level == 1 && index % 4 < 2 {
return 1;
}
let deltas = [-2_i32, -1, 1, 2];
let delta = deltas[index % deltas.len()];
(player_level as i32 + delta).clamp(1, win_level as i32) as u32
}
fn spawn_position(center: &BigFishVector2, index: usize) -> BigFishVector2 {
let side = index % 4;
let offset = ((index as f32 * 37.0) % 420.0) - 210.0;
match side {
0 => BigFishVector2 {
x: center.x - BIG_FISH_VIEW_WIDTH * 0.62,
y: center.y + offset,
},
1 => BigFishVector2 {
x: center.x + BIG_FISH_VIEW_WIDTH * 0.62,
y: center.y + offset,
},
2 => BigFishVector2 {
x: center.x + offset,
y: center.y - BIG_FISH_VIEW_HEIGHT * 0.58,
},
_ => BigFishVector2 {
x: center.x + offset,
y: center.y + BIG_FISH_VIEW_HEIGHT * 0.58,
},
}
}
fn remove_indices<T>(items: &mut Vec<T>, indices: &[usize]) {
let mut sorted = indices.to_vec();
sorted.sort_unstable();
sorted.dedup();
for index in sorted.into_iter().rev() {
if index < items.len() {
items.remove(index);
}
}
}
fn average_position(indices: &[usize], entities: &[BigFishRuntimeEntity]) -> BigFishVector2 {
let mut x = 0.0;
let mut y = 0.0;
for index in indices {
x += entities[*index].position.x;
y += entities[*index].position.y;
}
let count = indices.len().max(1) as f32;
BigFishVector2 {
x: x / count,
y: y / count,
}
}
fn distance(left: &BigFishVector2, right: &BigFishVector2) -> f32 {
let dx = left.x - right.x;
let dy = left.y - right.y;
(dx * dx + dy * dy).sqrt()
}
fn is_offscreen(position: &BigFishVector2, camera: &BigFishVector2, radius: f32) -> bool {
let half_w = BIG_FISH_VIEW_WIDTH / 2.0;
let half_h = BIG_FISH_VIEW_HEIGHT / 2.0;
position.x + radius < camera.x - half_w
|| position.x - radius > camera.x + half_w
|| position.y + radius < camera.y - half_h
|| position.y - radius > camera.y + half_h
}
fn clamp_world(value: f32, horizontal: bool) -> f32 {
let limit = if horizontal {
BIG_FISH_WORLD_HALF_WIDTH
} else {
BIG_FISH_WORLD_HALF_HEIGHT
};
value.clamp(-limit, limit)
}
fn entity_radius(level: u32) -> f32 {
18.0 + level as f32 * 4.0
}
impl fmt::Display for BigFishFieldError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
@@ -1316,11 +810,9 @@ impl fmt::Display for BigFishFieldError {
Self::MissingOwnerUserId => f.write_str("big_fish.owner_user_id 不能为空"),
Self::MissingMessageId => f.write_str("big_fish.message_id 不能为空"),
Self::MissingMessageText => f.write_str("big_fish.message_text 不能为空"),
Self::MissingRunId => f.write_str("big_fish.run_id 不能为空"),
Self::MissingDraft => f.write_str("big_fish.draft 尚未编译"),
Self::InvalidLevel => f.write_str("big_fish.level 不在合法等级范围内"),
Self::InvalidAssetKind => f.write_str("big_fish.asset_kind 或动作位非法"),
Self::InvalidRunState => f.write_str("big_fish.run 当前状态不允许推进"),
}
}
}
@@ -1370,123 +862,4 @@ mod tests {
assert!(coverage.blockers.iter().any(|item| item.contains("背景图")));
}
#[test]
fn same_level_wild_entity_can_be_collected_at_start() {
let draft = compile_default_draft(&infer_anchor_pack("深海", None));
let mut snapshot =
build_initial_runtime_snapshot("run-1".to_string(), "session-1".to_string(), &draft, 1);
snapshot.wild_entities[0].position = BigFishVector2 { x: 1.0, y: 0.0 };
let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 2);
assert!(next.owned_entities.len() >= 2);
assert!(
next.event_log
.iter()
.any(|event| event.contains("收编 1 级实体"))
);
}
#[test]
fn three_owned_entities_merge_into_next_level() {
let draft = compile_default_draft(&infer_anchor_pack("深海", None));
let mut snapshot = build_initial_runtime_snapshot(
"run-merge".to_string(),
"session-merge".to_string(),
&draft,
1,
);
snapshot.wild_entities.clear();
snapshot.owned_entities.push(BigFishRuntimeEntity {
entity_id: "owned-2".to_string(),
level: 1,
position: BigFishVector2 { x: 4.0, y: 0.0 },
radius: entity_radius(1),
offscreen_seconds: 0.0,
});
snapshot.owned_entities.push(BigFishRuntimeEntity {
entity_id: "owned-3".to_string(),
level: 1,
position: BigFishVector2 { x: 8.0, y: 0.0 },
radius: entity_radius(1),
offscreen_seconds: 0.0,
});
let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 2);
assert!(next.owned_entities.iter().any(|entity| entity.level == 2));
}
#[test]
fn final_level_immediately_wins() {
let draft = compile_default_draft(&infer_anchor_pack("深海", None));
let mut snapshot = build_initial_runtime_snapshot(
"run-win".to_string(),
"session-win".to_string(),
&draft,
1,
);
snapshot.owned_entities[0].level = draft.runtime_params.win_level;
let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 2);
assert_eq!(next.status, BigFishRunStatus::Won);
}
#[test]
fn offscreen_same_level_wild_entity_is_removed_after_three_seconds() {
let draft = compile_default_draft(&infer_anchor_pack("深海", None));
let mut snapshot = build_initial_runtime_snapshot(
"run-cull".to_string(),
"session-cull".to_string(),
&draft,
1,
);
snapshot.wild_entities.clear();
snapshot.wild_entities.push(BigFishRuntimeEntity {
entity_id: "wild-cull".to_string(),
level: 1,
position: BigFishVector2 { x: 1000.0, y: 0.0 },
radius: entity_radius(1),
offscreen_seconds: 2.8,
});
snapshot.updated_at_micros = 1_000_000;
let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 1_250_000);
assert!(
!next
.wild_entities
.iter()
.any(|entity| entity.entity_id == "wild-cull")
);
}
#[test]
fn offscreen_same_level_wild_entity_is_kept_before_three_seconds_elapsed() {
let draft = compile_default_draft(&infer_anchor_pack("深海", None));
let mut snapshot = build_initial_runtime_snapshot(
"run-cull-safe".to_string(),
"session-cull-safe".to_string(),
&draft,
1,
);
snapshot.wild_entities.clear();
snapshot.wild_entities.push(BigFishRuntimeEntity {
entity_id: "wild-cull-safe".to_string(),
level: 1,
position: BigFishVector2 { x: 1000.0, y: 0.0 },
radius: entity_radius(1),
offscreen_seconds: 2.7,
});
snapshot.updated_at_micros = 1_000_000;
let next = advance_runtime_snapshot(snapshot, &draft.runtime_params, 0.0, 0.0, 1_200_000);
assert!(
next.wild_entities
.iter()
.any(|entity| entity.entity_id == "wild-cull-safe")
);
}
}

View File

@@ -215,7 +215,7 @@ mod tests {
use serde_json::json;
#[test]
fn available_login_methods_keep_phone_then_wechat_order() {
fn available_login_methods_keep_phone_password_wechat_order() {
let methods = build_available_login_methods(true, true, true);
assert_eq!(
@@ -228,6 +228,13 @@ mod tests {
);
}
#[test]
fn available_login_methods_keep_password_as_default_entry() {
let methods = build_available_login_methods(false, true, false);
assert_eq!(methods, vec![AUTH_LOGIN_METHOD_PASSWORD.to_string()]);
}
#[test]
fn password_entry_request_uses_camel_case_fields() {
let payload = serde_json::to_value(PasswordEntryRequest {

View File

@@ -26,13 +26,6 @@ pub struct ExecuteBigFishActionRequest {
pub motion_key: Option<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct SubmitBigFishInputRequest {
pub x: f32,
pub y: f32,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct BigFishAnchorItemResponse {
@@ -169,47 +162,6 @@ pub struct BigFishActionResponse {
pub session: BigFishSessionSnapshotResponse,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct BigFishVector2Response {
pub x: f32,
pub y: f32,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct BigFishRuntimeEntityResponse {
pub entity_id: String,
pub level: u32,
pub position: BigFishVector2Response,
pub radius: f32,
pub offscreen_seconds: f32,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct BigFishRuntimeSnapshotResponse {
pub run_id: String,
pub session_id: String,
pub status: String,
pub tick: u64,
pub player_level: u32,
pub win_level: u32,
pub leader_entity_id: Option<String>,
pub owned_entities: Vec<BigFishRuntimeEntityResponse>,
pub wild_entities: Vec<BigFishRuntimeEntityResponse>,
pub camera_center: BigFishVector2Response,
pub last_input: BigFishVector2Response,
pub event_log: Vec<String>,
pub updated_at: String,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct BigFishRunResponse {
pub run: BigFishRuntimeSnapshotResponse,
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -251,76 +251,4 @@ impl SpacetimeClient {
.await
}
pub async fn start_big_fish_run(
&self,
input: BigFishRunStartRecordInput,
) -> Result<BigFishRuntimeRecord, SpacetimeClientError> {
let procedure_input = BigFishRunStartInput {
run_id: input.run_id,
session_id: input.session_id,
owner_user_id: input.owner_user_id,
started_at_micros: input.started_at_micros,
};
self.call_after_connect(move |connection, sender| {
connection
.procedures()
.start_big_fish_run_then(procedure_input, move |_, result| {
let mapped = result
.map_err(|error| SpacetimeClientError::Procedure(error.to_string()))
.and_then(map_big_fish_run_procedure_result);
send_once(&sender, mapped);
});
})
.await
}
pub async fn submit_big_fish_input(
&self,
input: BigFishRunInputSubmitRecordInput,
) -> Result<BigFishRuntimeRecord, SpacetimeClientError> {
let procedure_input = BigFishRunInputSubmitInput {
run_id: input.run_id,
owner_user_id: input.owner_user_id,
input_x: input.input_x,
input_y: input.input_y,
submitted_at_micros: input.submitted_at_micros,
};
self.call_after_connect(move |connection, sender| {
connection.procedures().submit_big_fish_input_then(
procedure_input,
move |_, result| {
let mapped = result
.map_err(|error| SpacetimeClientError::Procedure(error.to_string()))
.and_then(map_big_fish_run_procedure_result);
send_once(&sender, mapped);
},
);
})
.await
}
pub async fn get_big_fish_run(
&self,
run_id: String,
owner_user_id: String,
) -> Result<BigFishRuntimeRecord, SpacetimeClientError> {
let procedure_input = BigFishRunGetInput {
run_id,
owner_user_id,
};
self.call_after_connect(move |connection, sender| {
connection
.procedures()
.get_big_fish_run_then(procedure_input, move |_, result| {
let mapped = result
.map_err(|error| SpacetimeClientError::Procedure(error.to_string()))
.and_then(map_big_fish_run_procedure_result);
send_once(&sender, mapped);
});
})
.await
}
}

View File

@@ -10,10 +10,8 @@ pub use mapper::{
BigFishAnchorPackRecord, BigFishAssetCoverageRecord, BigFishAssetGenerateRecordInput,
BigFishAssetSlotRecord, BigFishBackgroundBlueprintRecord, BigFishGameDraftRecord,
BigFishLevelBlueprintRecord, BigFishMessageFinalizeRecordInput,
BigFishMessageSubmitRecordInput, BigFishRunInputSubmitRecordInput, BigFishRunStartRecordInput,
BigFishRuntimeEntityRecord, BigFishRuntimeParamsRecord, BigFishRuntimeRecord,
BigFishSessionCreateRecordInput, BigFishSessionRecord, BigFishVector2Record,
BigFishWorkSummaryRecord, CustomWorldAgentActionExecuteRecord,
BigFishMessageSubmitRecordInput, BigFishRuntimeParamsRecord, BigFishSessionCreateRecordInput,
BigFishSessionRecord, BigFishWorkSummaryRecord, CustomWorldAgentActionExecuteRecord,
CustomWorldAgentActionExecuteRecordInput, CustomWorldAgentCheckpointRecord,
CustomWorldAgentMessageFinalizeRecordInput, CustomWorldAgentMessageRecord,
CustomWorldAgentMessageSubmitRecordInput, CustomWorldAgentOperationProgressRecordInput,

View File

@@ -1262,26 +1262,6 @@ pub(crate) fn map_big_fish_works_procedure_result(
})
}
pub(crate) fn map_big_fish_run_procedure_result(
result: BigFishRunProcedureResult,
) -> Result<BigFishRuntimeRecord, SpacetimeClientError> {
if !result.ok {
return Err(SpacetimeClientError::Procedure(
result
.error_message
.unwrap_or_else(|| "SpacetimeDB procedure 返回未知错误".to_string()),
));
}
let run = result.run.ok_or_else(|| {
SpacetimeClientError::Procedure(
"SpacetimeDB procedure 未返回 big fish runtime 快照".to_string(),
)
})?;
Ok(map_big_fish_runtime_snapshot(run))
}
pub(crate) fn map_story_session_procedure_result(
result: StorySessionProcedureResult,
) -> Result<StorySessionResultRecord, SpacetimeClientError> {
@@ -2492,53 +2472,6 @@ pub(crate) fn map_big_fish_agent_message_snapshot(
}
}
pub(crate) fn map_big_fish_runtime_snapshot(
snapshot: BigFishRuntimeSnapshot,
) -> BigFishRuntimeRecord {
BigFishRuntimeRecord {
run_id: snapshot.run_id,
session_id: snapshot.session_id,
status: format_big_fish_run_status(snapshot.status).to_string(),
tick: snapshot.tick,
player_level: snapshot.player_level,
win_level: snapshot.win_level,
leader_entity_id: snapshot.leader_entity_id,
owned_entities: snapshot
.owned_entities
.into_iter()
.map(map_big_fish_runtime_entity)
.collect(),
wild_entities: snapshot
.wild_entities
.into_iter()
.map(map_big_fish_runtime_entity)
.collect(),
camera_center: map_big_fish_vector2(snapshot.camera_center),
last_input: map_big_fish_vector2(snapshot.last_input),
event_log: snapshot.event_log,
updated_at: format_timestamp_micros(snapshot.updated_at_micros),
}
}
pub(crate) fn map_big_fish_runtime_entity(
snapshot: BigFishRuntimeEntity,
) -> BigFishRuntimeEntityRecord {
BigFishRuntimeEntityRecord {
entity_id: snapshot.entity_id,
level: snapshot.level,
position: map_big_fish_vector2(snapshot.position),
radius: snapshot.radius,
offscreen_seconds: snapshot.offscreen_seconds,
}
}
pub(crate) fn map_big_fish_vector2(snapshot: BigFishVector2) -> BigFishVector2Record {
BigFishVector2Record {
x: snapshot.x,
y: snapshot.y,
}
}
pub(crate) fn map_story_session_snapshot(snapshot: StorySessionSnapshot) -> StorySessionRecord {
StorySessionRecord {
story_session_id: snapshot.story_session_id,
@@ -3244,14 +3177,6 @@ pub(crate) fn format_big_fish_asset_status(value: BigFishAssetStatus) -> &'stati
}
}
pub(crate) fn format_big_fish_run_status(value: BigFishRunStatus) -> &'static str {
match value {
BigFishRunStatus::Running => "running",
BigFishRunStatus::Won => "won",
BigFishRunStatus::Failed => "failed",
}
}
pub(crate) fn format_custom_world_theme_mode(value: DomainCustomWorldThemeMode) -> &'static str {
match value {
DomainCustomWorldThemeMode::Martial => "martial",
@@ -4503,23 +4428,6 @@ pub struct BigFishAssetGenerateRecordInput {
pub generated_at_micros: i64,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct BigFishRunStartRecordInput {
pub run_id: String,
pub session_id: String,
pub owner_user_id: String,
pub started_at_micros: i64,
}
#[derive(Clone, Debug, PartialEq)]
pub struct BigFishRunInputSubmitRecordInput {
pub run_id: String,
pub owner_user_id: String,
pub input_x: f32,
pub input_y: f32,
pub submitted_at_micros: i64,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct BigFishAnchorItemRecord {
pub key: String,
@@ -4652,38 +4560,6 @@ pub struct BigFishWorkSummaryRecord {
pub background_ready: bool,
}
#[derive(Clone, Debug, PartialEq)]
pub struct BigFishVector2Record {
pub x: f32,
pub y: f32,
}
#[derive(Clone, Debug, PartialEq)]
pub struct BigFishRuntimeEntityRecord {
pub entity_id: String,
pub level: u32,
pub position: BigFishVector2Record,
pub radius: f32,
pub offscreen_seconds: f32,
}
#[derive(Clone, Debug, PartialEq)]
pub struct BigFishRuntimeRecord {
pub run_id: String,
pub session_id: String,
pub status: String,
pub tick: u64,
pub player_level: u32,
pub win_level: u32,
pub leader_entity_id: Option<String>,
pub owned_entities: Vec<BigFishRuntimeEntityRecord>,
pub wild_entities: Vec<BigFishRuntimeEntityRecord>,
pub camera_center: BigFishVector2Record,
pub last_input: BigFishVector2Record,
pub event_log: Vec<String>,
pub updated_at: String,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ResolveNpcBattleInteractionInput {
pub npc_interaction: DomainResolveNpcInteractionInput,

View File

@@ -0,0 +1,62 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::database_migration_authorize_operator_input_type::DatabaseMigrationAuthorizeOperatorInput;
use super::database_migration_operator_procedure_result_type::DatabaseMigrationOperatorProcedureResult;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
struct AuthorizeDatabaseMigrationOperatorArgs {
pub input: DatabaseMigrationAuthorizeOperatorInput,
}
impl __sdk::InModule for AuthorizeDatabaseMigrationOperatorArgs {
type Module = super::RemoteModule;
}
#[allow(non_camel_case_types)]
/// Extension trait for access to the procedure `authorize_database_migration_operator`.
///
/// Implemented for [`super::RemoteProcedures`].
pub trait authorize_database_migration_operator {
fn authorize_database_migration_operator(
&self,
input: DatabaseMigrationAuthorizeOperatorInput,
) {
self.authorize_database_migration_operator_then(input, |_, _| {});
}
fn authorize_database_migration_operator_then(
&self,
input: DatabaseMigrationAuthorizeOperatorInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationOperatorProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
);
}
impl authorize_database_migration_operator for super::RemoteProcedures {
fn authorize_database_migration_operator_then(
&self,
input: DatabaseMigrationAuthorizeOperatorInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationOperatorProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
) {
self.imp
.invoke_procedure_with_callback::<_, DatabaseMigrationOperatorProcedureResult>(
"authorize_database_migration_operator",
AuthorizeDatabaseMigrationOperatorArgs { input },
__callback,
);
}
}

View File

@@ -1,19 +0,0 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct BigFishRunInputSubmitInput {
pub run_id: String,
pub owner_user_id: String,
pub input_x: f32,
pub input_y: f32,
pub submitted_at_micros: i64,
}
impl __sdk::InModule for BigFishRunInputSubmitInput {
type Module = super::RemoteModule;
}

View File

@@ -1,82 +0,0 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::big_fish_run_status_type::BigFishRunStatus;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct BigFishRuntimeRun {
pub run_id: String,
pub session_id: String,
pub owner_user_id: String,
pub status: BigFishRunStatus,
pub snapshot_json: String,
pub last_input_x: f32,
pub last_input_y: f32,
pub tick: u64,
pub created_at: __sdk::Timestamp,
pub updated_at: __sdk::Timestamp,
}
impl __sdk::InModule for BigFishRuntimeRun {
type Module = super::RemoteModule;
}
/// Column accessor struct for the table `BigFishRuntimeRun`.
///
/// Provides typed access to columns for query building.
pub struct BigFishRuntimeRunCols {
pub run_id: __sdk::__query_builder::Col<BigFishRuntimeRun, String>,
pub session_id: __sdk::__query_builder::Col<BigFishRuntimeRun, String>,
pub owner_user_id: __sdk::__query_builder::Col<BigFishRuntimeRun, String>,
pub status: __sdk::__query_builder::Col<BigFishRuntimeRun, BigFishRunStatus>,
pub snapshot_json: __sdk::__query_builder::Col<BigFishRuntimeRun, String>,
pub last_input_x: __sdk::__query_builder::Col<BigFishRuntimeRun, f32>,
pub last_input_y: __sdk::__query_builder::Col<BigFishRuntimeRun, f32>,
pub tick: __sdk::__query_builder::Col<BigFishRuntimeRun, u64>,
pub created_at: __sdk::__query_builder::Col<BigFishRuntimeRun, __sdk::Timestamp>,
pub updated_at: __sdk::__query_builder::Col<BigFishRuntimeRun, __sdk::Timestamp>,
}
impl __sdk::__query_builder::HasCols for BigFishRuntimeRun {
type Cols = BigFishRuntimeRunCols;
fn cols(table_name: &'static str) -> Self::Cols {
BigFishRuntimeRunCols {
run_id: __sdk::__query_builder::Col::new(table_name, "run_id"),
session_id: __sdk::__query_builder::Col::new(table_name, "session_id"),
owner_user_id: __sdk::__query_builder::Col::new(table_name, "owner_user_id"),
status: __sdk::__query_builder::Col::new(table_name, "status"),
snapshot_json: __sdk::__query_builder::Col::new(table_name, "snapshot_json"),
last_input_x: __sdk::__query_builder::Col::new(table_name, "last_input_x"),
last_input_y: __sdk::__query_builder::Col::new(table_name, "last_input_y"),
tick: __sdk::__query_builder::Col::new(table_name, "tick"),
created_at: __sdk::__query_builder::Col::new(table_name, "created_at"),
updated_at: __sdk::__query_builder::Col::new(table_name, "updated_at"),
}
}
}
/// Indexed column accessor struct for the table `BigFishRuntimeRun`.
///
/// Provides typed access to indexed columns for query building.
pub struct BigFishRuntimeRunIxCols {
pub owner_user_id: __sdk::__query_builder::IxCol<BigFishRuntimeRun, String>,
pub run_id: __sdk::__query_builder::IxCol<BigFishRuntimeRun, String>,
pub session_id: __sdk::__query_builder::IxCol<BigFishRuntimeRun, String>,
}
impl __sdk::__query_builder::HasIxCols for BigFishRuntimeRun {
type IxCols = BigFishRuntimeRunIxCols;
fn ix_cols(table_name: &'static str) -> Self::IxCols {
BigFishRuntimeRunIxCols {
owner_user_id: __sdk::__query_builder::IxCol::new(table_name, "owner_user_id"),
run_id: __sdk::__query_builder::IxCol::new(table_name, "run_id"),
session_id: __sdk::__query_builder::IxCol::new(table_name, "session_id"),
}
}
}
impl __sdk::__query_builder::CanBeLookupTable for BigFishRuntimeRun {}

View File

@@ -1,31 +0,0 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::big_fish_run_status_type::BigFishRunStatus;
use super::big_fish_runtime_entity_type::BigFishRuntimeEntity;
use super::big_fish_vector_2_type::BigFishVector2;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct BigFishRuntimeSnapshot {
pub run_id: String,
pub session_id: String,
pub status: BigFishRunStatus,
pub tick: u64,
pub player_level: u32,
pub win_level: u32,
pub leader_entity_id: Option<String>,
pub owned_entities: Vec<BigFishRuntimeEntity>,
pub wild_entities: Vec<BigFishRuntimeEntity>,
pub camera_center: BigFishVector2,
pub last_input: BigFishVector2,
pub event_log: Vec<String>,
pub updated_at_micros: i64,
}
impl __sdk::InModule for BigFishRuntimeSnapshot {
type Module = super::RemoteModule;
}

View File

@@ -6,15 +6,12 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
#[derive(Copy, Eq, Hash)]
pub enum BigFishRunStatus {
Running,
Won,
Failed,
pub struct DatabaseMigrationAuthorizeOperatorInput {
pub bootstrap_secret: String,
pub operator_identity_hex: String,
pub note: String,
}
impl __sdk::InModule for BigFishRunStatus {
impl __sdk::InModule for DatabaseMigrationAuthorizeOperatorInput {
type Module = super::RemoteModule;
}

View File

@@ -6,11 +6,10 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct BigFishRunGetInput {
pub run_id: String,
pub owner_user_id: String,
pub struct DatabaseMigrationExportInput {
pub include_tables: Vec<String>,
}
impl __sdk::InModule for BigFishRunGetInput {
impl __sdk::InModule for DatabaseMigrationExportInput {
type Module = super::RemoteModule;
}

View File

@@ -0,0 +1,18 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct DatabaseMigrationImportInput {
pub migration_json: String,
pub include_tables: Vec<String>,
pub replace_existing: bool,
pub dry_run: bool,
}
impl __sdk::InModule for DatabaseMigrationImportInput {
type Module = super::RemoteModule;
}

View File

@@ -4,16 +4,14 @@
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::big_fish_runtime_snapshot_type::BigFishRuntimeSnapshot;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct BigFishRunProcedureResult {
pub struct DatabaseMigrationOperatorProcedureResult {
pub ok: bool,
pub run: Option<BigFishRuntimeSnapshot>,
pub operator_identity_hex: Option<String>,
pub error_message: Option<String>,
}
impl __sdk::InModule for BigFishRunProcedureResult {
impl __sdk::InModule for DatabaseMigrationOperatorProcedureResult {
type Module = super::RemoteModule;
}

View File

@@ -0,0 +1,59 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct DatabaseMigrationOperator {
pub operator_identity: __sdk::Identity,
pub created_at: __sdk::Timestamp,
pub created_by: __sdk::Identity,
pub note: String,
}
impl __sdk::InModule for DatabaseMigrationOperator {
type Module = super::RemoteModule;
}
/// Column accessor struct for the table `DatabaseMigrationOperator`.
///
/// Provides typed access to columns for query building.
pub struct DatabaseMigrationOperatorCols {
pub operator_identity: __sdk::__query_builder::Col<DatabaseMigrationOperator, __sdk::Identity>,
pub created_at: __sdk::__query_builder::Col<DatabaseMigrationOperator, __sdk::Timestamp>,
pub created_by: __sdk::__query_builder::Col<DatabaseMigrationOperator, __sdk::Identity>,
pub note: __sdk::__query_builder::Col<DatabaseMigrationOperator, String>,
}
impl __sdk::__query_builder::HasCols for DatabaseMigrationOperator {
type Cols = DatabaseMigrationOperatorCols;
fn cols(table_name: &'static str) -> Self::Cols {
DatabaseMigrationOperatorCols {
operator_identity: __sdk::__query_builder::Col::new(table_name, "operator_identity"),
created_at: __sdk::__query_builder::Col::new(table_name, "created_at"),
created_by: __sdk::__query_builder::Col::new(table_name, "created_by"),
note: __sdk::__query_builder::Col::new(table_name, "note"),
}
}
}
/// Indexed column accessor struct for the table `DatabaseMigrationOperator`.
///
/// Provides typed access to indexed columns for query building.
pub struct DatabaseMigrationOperatorIxCols {
pub operator_identity:
__sdk::__query_builder::IxCol<DatabaseMigrationOperator, __sdk::Identity>,
}
impl __sdk::__query_builder::HasIxCols for DatabaseMigrationOperator {
type IxCols = DatabaseMigrationOperatorIxCols;
fn ix_cols(table_name: &'static str) -> Self::IxCols {
DatabaseMigrationOperatorIxCols {
operator_identity: __sdk::__query_builder::IxCol::new(table_name, "operator_identity"),
}
}
}
impl __sdk::__query_builder::CanBeLookupTable for DatabaseMigrationOperator {}

View File

@@ -4,18 +4,18 @@
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::big_fish_vector_2_type::BigFishVector2;
use super::database_migration_table_stat_type::DatabaseMigrationTableStat;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct BigFishRuntimeEntity {
pub entity_id: String,
pub level: u32,
pub position: BigFishVector2,
pub radius: f32,
pub offscreen_seconds: f32,
pub struct DatabaseMigrationProcedureResult {
pub ok: bool,
pub schema_version: u32,
pub migration_json: Option<String>,
pub table_stats: Vec<DatabaseMigrationTableStat>,
pub error_message: Option<String>,
}
impl __sdk::InModule for BigFishRuntimeEntity {
impl __sdk::InModule for DatabaseMigrationProcedureResult {
type Module = super::RemoteModule;
}

View File

@@ -6,11 +6,10 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct BigFishVector2 {
pub x: f32,
pub y: f32,
pub struct DatabaseMigrationRevokeOperatorInput {
pub operator_identity_hex: String,
}
impl __sdk::InModule for BigFishVector2 {
impl __sdk::InModule for DatabaseMigrationRevokeOperatorInput {
type Module = super::RemoteModule;
}

View File

@@ -6,13 +6,13 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
pub struct BigFishRunStartInput {
pub run_id: String,
pub session_id: String,
pub owner_user_id: String,
pub started_at_micros: i64,
pub struct DatabaseMigrationTableStat {
pub table_name: String,
pub exported_row_count: u64,
pub imported_row_count: u64,
pub skipped_row_count: u64,
}
impl __sdk::InModule for BigFishRunStartInput {
impl __sdk::InModule for DatabaseMigrationTableStat {
type Module = super::RemoteModule;
}

View File

@@ -0,0 +1,59 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::database_migration_export_input_type::DatabaseMigrationExportInput;
use super::database_migration_procedure_result_type::DatabaseMigrationProcedureResult;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
struct ExportDatabaseMigrationToFileArgs {
pub input: DatabaseMigrationExportInput,
}
impl __sdk::InModule for ExportDatabaseMigrationToFileArgs {
type Module = super::RemoteModule;
}
#[allow(non_camel_case_types)]
/// Extension trait for access to the procedure `export_database_migration_to_file`.
///
/// Implemented for [`super::RemoteProcedures`].
pub trait export_database_migration_to_file {
fn export_database_migration_to_file(&self, input: DatabaseMigrationExportInput) {
self.export_database_migration_to_file_then(input, |_, _| {});
}
fn export_database_migration_to_file_then(
&self,
input: DatabaseMigrationExportInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
);
}
impl export_database_migration_to_file for super::RemoteProcedures {
fn export_database_migration_to_file_then(
&self,
input: DatabaseMigrationExportInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
) {
self.imp
.invoke_procedure_with_callback::<_, DatabaseMigrationProcedureResult>(
"export_database_migration_to_file",
ExportDatabaseMigrationToFileArgs { input },
__callback,
);
}
}

View File

@@ -1,59 +0,0 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::big_fish_run_get_input_type::BigFishRunGetInput;
use super::big_fish_run_procedure_result_type::BigFishRunProcedureResult;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
struct GetBigFishRunArgs {
pub input: BigFishRunGetInput,
}
impl __sdk::InModule for GetBigFishRunArgs {
type Module = super::RemoteModule;
}
#[allow(non_camel_case_types)]
/// Extension trait for access to the procedure `get_big_fish_run`.
///
/// Implemented for [`super::RemoteProcedures`].
pub trait get_big_fish_run {
fn get_big_fish_run(&self, input: BigFishRunGetInput) {
self.get_big_fish_run_then(input, |_, _| {});
}
fn get_big_fish_run_then(
&self,
input: BigFishRunGetInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<BigFishRunProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
);
}
impl get_big_fish_run for super::RemoteProcedures {
fn get_big_fish_run_then(
&self,
input: BigFishRunGetInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<BigFishRunProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
) {
self.imp
.invoke_procedure_with_callback::<_, BigFishRunProcedureResult>(
"get_big_fish_run",
GetBigFishRunArgs { input },
__callback,
);
}
}

View File

@@ -0,0 +1,59 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::database_migration_import_input_type::DatabaseMigrationImportInput;
use super::database_migration_procedure_result_type::DatabaseMigrationProcedureResult;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
struct ImportDatabaseMigrationFromFileArgs {
pub input: DatabaseMigrationImportInput,
}
impl __sdk::InModule for ImportDatabaseMigrationFromFileArgs {
type Module = super::RemoteModule;
}
#[allow(non_camel_case_types)]
/// Extension trait for access to the procedure `import_database_migration_from_file`.
///
/// Implemented for [`super::RemoteProcedures`].
pub trait import_database_migration_from_file {
fn import_database_migration_from_file(&self, input: DatabaseMigrationImportInput) {
self.import_database_migration_from_file_then(input, |_, _| {});
}
fn import_database_migration_from_file_then(
&self,
input: DatabaseMigrationImportInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
);
}
impl import_database_migration_from_file for super::RemoteProcedures {
fn import_database_migration_from_file_then(
&self,
input: DatabaseMigrationImportInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
) {
self.imp
.invoke_procedure_with_callback::<_, DatabaseMigrationProcedureResult>(
"import_database_migration_from_file",
ImportDatabaseMigrationFromFileArgs { input },
__callback,
);
}
}

View File

@@ -0,0 +1,59 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::database_migration_import_input_type::DatabaseMigrationImportInput;
use super::database_migration_procedure_result_type::DatabaseMigrationProcedureResult;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
struct ImportDatabaseMigrationIncrementalFromFileArgs {
pub input: DatabaseMigrationImportInput,
}
impl __sdk::InModule for ImportDatabaseMigrationIncrementalFromFileArgs {
type Module = super::RemoteModule;
}
#[allow(non_camel_case_types)]
/// Extension trait for access to the procedure `import_database_migration_incremental_from_file`.
///
/// Implemented for [`super::RemoteProcedures`].
pub trait import_database_migration_incremental_from_file {
fn import_database_migration_incremental_from_file(&self, input: DatabaseMigrationImportInput) {
self.import_database_migration_incremental_from_file_then(input, |_, _| {});
}
fn import_database_migration_incremental_from_file_then(
&self,
input: DatabaseMigrationImportInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
);
}
impl import_database_migration_incremental_from_file for super::RemoteProcedures {
fn import_database_migration_incremental_from_file_then(
&self,
input: DatabaseMigrationImportInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
) {
self.imp
.invoke_procedure_with_callback::<_, DatabaseMigrationProcedureResult>(
"import_database_migration_incremental_from_file",
ImportDatabaseMigrationIncrementalFromFileArgs { input },
__callback,
);
}
}

View File

@@ -58,6 +58,7 @@ pub mod auth_store_snapshot_procedure_result_type;
pub mod auth_store_snapshot_record_type;
pub mod auth_store_snapshot_type;
pub mod auth_store_snapshot_upsert_input_type;
pub mod authorize_database_migration_operator_procedure;
pub mod battle_mode_type;
pub mod battle_state_input_type;
pub mod battle_state_procedure_result_type;
@@ -89,20 +90,11 @@ pub mod big_fish_level_blueprint_type;
pub mod big_fish_message_finalize_input_type;
pub mod big_fish_message_submit_input_type;
pub mod big_fish_publish_input_type;
pub mod big_fish_run_get_input_type;
pub mod big_fish_run_input_submit_input_type;
pub mod big_fish_run_procedure_result_type;
pub mod big_fish_run_start_input_type;
pub mod big_fish_run_status_type;
pub mod big_fish_runtime_entity_type;
pub mod big_fish_runtime_params_type;
pub mod big_fish_runtime_run_type;
pub mod big_fish_runtime_snapshot_type;
pub mod big_fish_session_create_input_type;
pub mod big_fish_session_get_input_type;
pub mod big_fish_session_procedure_result_type;
pub mod big_fish_session_snapshot_type;
pub mod big_fish_vector_2_type;
pub mod big_fish_work_delete_input_type;
pub mod big_fish_works_list_input_type;
pub mod big_fish_works_procedure_result_type;
@@ -188,6 +180,14 @@ pub mod custom_world_theme_mode_type;
pub mod custom_world_work_summary_snapshot_type;
pub mod custom_world_works_list_input_type;
pub mod custom_world_works_list_result_type;
pub mod database_migration_authorize_operator_input_type;
pub mod database_migration_export_input_type;
pub mod database_migration_import_input_type;
pub mod database_migration_operator_procedure_result_type;
pub mod database_migration_operator_type;
pub mod database_migration_procedure_result_type;
pub mod database_migration_revoke_operator_input_type;
pub mod database_migration_table_stat_type;
pub mod delete_big_fish_work_procedure;
pub mod delete_custom_world_agent_session_procedure;
pub mod delete_custom_world_profile_and_return_procedure;
@@ -197,6 +197,7 @@ pub mod drag_puzzle_piece_or_group_procedure;
pub mod equip_inventory_item_input_type;
pub mod execute_custom_world_agent_action_procedure;
pub mod export_auth_store_snapshot_from_tables_procedure;
pub mod export_database_migration_to_file_procedure;
pub mod fail_ai_task_and_return_procedure;
pub mod finalize_big_fish_agent_message_turn_procedure;
pub mod finalize_custom_world_agent_message_turn_procedure;
@@ -204,7 +205,6 @@ pub mod finalize_puzzle_agent_message_turn_procedure;
pub mod generate_big_fish_asset_procedure;
pub mod get_auth_store_snapshot_procedure;
pub mod get_battle_state_procedure;
pub mod get_big_fish_run_procedure;
pub mod get_big_fish_session_procedure;
pub mod get_chapter_progression_procedure;
pub mod get_custom_world_agent_card_detail_procedure;
@@ -230,6 +230,8 @@ pub mod grant_inventory_item_input_type;
pub mod grant_player_progression_experience_and_return_procedure;
pub mod grant_player_progression_experience_reducer;
pub mod import_auth_store_snapshot_procedure;
pub mod import_database_migration_from_file_procedure;
pub mod import_database_migration_incremental_from_file_procedure;
pub mod inventory_container_kind_type;
pub mod inventory_equipment_slot_type;
pub mod inventory_item_rarity_type;
@@ -358,6 +360,7 @@ pub mod resolve_npc_social_action_reducer;
pub mod resolve_treasure_interaction_and_return_procedure;
pub mod resolve_treasure_interaction_reducer;
pub mod resume_profile_save_archive_and_return_procedure;
pub mod revoke_database_migration_operator_procedure;
pub mod rpg_agent_draft_card_kind_type;
pub mod rpg_agent_draft_card_status_type;
pub mod rpg_agent_message_kind_type;
@@ -427,7 +430,6 @@ pub mod save_puzzle_generated_images_procedure;
pub mod select_puzzle_cover_image_procedure;
pub mod start_ai_task_reducer;
pub mod start_ai_task_stage_reducer;
pub mod start_big_fish_run_procedure;
pub mod start_puzzle_run_procedure;
pub mod story_continue_input_type;
pub mod story_event_kind_type;
@@ -440,7 +442,6 @@ pub mod story_session_state_input_type;
pub mod story_session_state_procedure_result_type;
pub mod story_session_status_type;
pub mod story_session_type;
pub mod submit_big_fish_input_procedure;
pub mod submit_big_fish_message_procedure;
pub mod submit_custom_world_agent_message_procedure;
pub mod submit_puzzle_agent_message_procedure;
@@ -522,6 +523,7 @@ pub use auth_store_snapshot_procedure_result_type::AuthStoreSnapshotProcedureRes
pub use auth_store_snapshot_record_type::AuthStoreSnapshotRecord;
pub use auth_store_snapshot_type::AuthStoreSnapshot;
pub use auth_store_snapshot_upsert_input_type::AuthStoreSnapshotUpsertInput;
pub use authorize_database_migration_operator_procedure::authorize_database_migration_operator;
pub use battle_mode_type::BattleMode;
pub use battle_state_input_type::BattleStateInput;
pub use battle_state_procedure_result_type::BattleStateProcedureResult;
@@ -553,20 +555,11 @@ pub use big_fish_level_blueprint_type::BigFishLevelBlueprint;
pub use big_fish_message_finalize_input_type::BigFishMessageFinalizeInput;
pub use big_fish_message_submit_input_type::BigFishMessageSubmitInput;
pub use big_fish_publish_input_type::BigFishPublishInput;
pub use big_fish_run_get_input_type::BigFishRunGetInput;
pub use big_fish_run_input_submit_input_type::BigFishRunInputSubmitInput;
pub use big_fish_run_procedure_result_type::BigFishRunProcedureResult;
pub use big_fish_run_start_input_type::BigFishRunStartInput;
pub use big_fish_run_status_type::BigFishRunStatus;
pub use big_fish_runtime_entity_type::BigFishRuntimeEntity;
pub use big_fish_runtime_params_type::BigFishRuntimeParams;
pub use big_fish_runtime_run_type::BigFishRuntimeRun;
pub use big_fish_runtime_snapshot_type::BigFishRuntimeSnapshot;
pub use big_fish_session_create_input_type::BigFishSessionCreateInput;
pub use big_fish_session_get_input_type::BigFishSessionGetInput;
pub use big_fish_session_procedure_result_type::BigFishSessionProcedureResult;
pub use big_fish_session_snapshot_type::BigFishSessionSnapshot;
pub use big_fish_vector_2_type::BigFishVector2;
pub use big_fish_work_delete_input_type::BigFishWorkDeleteInput;
pub use big_fish_works_list_input_type::BigFishWorksListInput;
pub use big_fish_works_procedure_result_type::BigFishWorksProcedureResult;
@@ -652,6 +645,14 @@ pub use custom_world_theme_mode_type::CustomWorldThemeMode;
pub use custom_world_work_summary_snapshot_type::CustomWorldWorkSummarySnapshot;
pub use custom_world_works_list_input_type::CustomWorldWorksListInput;
pub use custom_world_works_list_result_type::CustomWorldWorksListResult;
pub use database_migration_authorize_operator_input_type::DatabaseMigrationAuthorizeOperatorInput;
pub use database_migration_export_input_type::DatabaseMigrationExportInput;
pub use database_migration_import_input_type::DatabaseMigrationImportInput;
pub use database_migration_operator_procedure_result_type::DatabaseMigrationOperatorProcedureResult;
pub use database_migration_operator_type::DatabaseMigrationOperator;
pub use database_migration_procedure_result_type::DatabaseMigrationProcedureResult;
pub use database_migration_revoke_operator_input_type::DatabaseMigrationRevokeOperatorInput;
pub use database_migration_table_stat_type::DatabaseMigrationTableStat;
pub use delete_big_fish_work_procedure::delete_big_fish_work;
pub use delete_custom_world_agent_session_procedure::delete_custom_world_agent_session;
pub use delete_custom_world_profile_and_return_procedure::delete_custom_world_profile_and_return;
@@ -661,6 +662,7 @@ pub use drag_puzzle_piece_or_group_procedure::drag_puzzle_piece_or_group;
pub use equip_inventory_item_input_type::EquipInventoryItemInput;
pub use execute_custom_world_agent_action_procedure::execute_custom_world_agent_action;
pub use export_auth_store_snapshot_from_tables_procedure::export_auth_store_snapshot_from_tables;
pub use export_database_migration_to_file_procedure::export_database_migration_to_file;
pub use fail_ai_task_and_return_procedure::fail_ai_task_and_return;
pub use finalize_big_fish_agent_message_turn_procedure::finalize_big_fish_agent_message_turn;
pub use finalize_custom_world_agent_message_turn_procedure::finalize_custom_world_agent_message_turn;
@@ -668,7 +670,6 @@ pub use finalize_puzzle_agent_message_turn_procedure::finalize_puzzle_agent_mess
pub use generate_big_fish_asset_procedure::generate_big_fish_asset;
pub use get_auth_store_snapshot_procedure::get_auth_store_snapshot;
pub use get_battle_state_procedure::get_battle_state;
pub use get_big_fish_run_procedure::get_big_fish_run;
pub use get_big_fish_session_procedure::get_big_fish_session;
pub use get_chapter_progression_procedure::get_chapter_progression;
pub use get_custom_world_agent_card_detail_procedure::get_custom_world_agent_card_detail;
@@ -694,6 +695,8 @@ pub use grant_inventory_item_input_type::GrantInventoryItemInput;
pub use grant_player_progression_experience_and_return_procedure::grant_player_progression_experience_and_return;
pub use grant_player_progression_experience_reducer::grant_player_progression_experience;
pub use import_auth_store_snapshot_procedure::import_auth_store_snapshot;
pub use import_database_migration_from_file_procedure::import_database_migration_from_file;
pub use import_database_migration_incremental_from_file_procedure::import_database_migration_incremental_from_file;
pub use inventory_container_kind_type::InventoryContainerKind;
pub use inventory_equipment_slot_type::InventoryEquipmentSlot;
pub use inventory_item_rarity_type::InventoryItemRarity;
@@ -822,6 +825,7 @@ pub use resolve_npc_social_action_reducer::resolve_npc_social_action;
pub use resolve_treasure_interaction_and_return_procedure::resolve_treasure_interaction_and_return;
pub use resolve_treasure_interaction_reducer::resolve_treasure_interaction;
pub use resume_profile_save_archive_and_return_procedure::resume_profile_save_archive_and_return;
pub use revoke_database_migration_operator_procedure::revoke_database_migration_operator;
pub use rpg_agent_draft_card_kind_type::RpgAgentDraftCardKind;
pub use rpg_agent_draft_card_status_type::RpgAgentDraftCardStatus;
pub use rpg_agent_message_kind_type::RpgAgentMessageKind;
@@ -891,7 +895,6 @@ pub use save_puzzle_generated_images_procedure::save_puzzle_generated_images;
pub use select_puzzle_cover_image_procedure::select_puzzle_cover_image;
pub use start_ai_task_reducer::start_ai_task;
pub use start_ai_task_stage_reducer::start_ai_task_stage;
pub use start_big_fish_run_procedure::start_big_fish_run;
pub use start_puzzle_run_procedure::start_puzzle_run;
pub use story_continue_input_type::StoryContinueInput;
pub use story_event_kind_type::StoryEventKind;
@@ -904,7 +907,6 @@ pub use story_session_state_input_type::StorySessionStateInput;
pub use story_session_state_procedure_result_type::StorySessionStateProcedureResult;
pub use story_session_status_type::StorySessionStatus;
pub use story_session_type::StorySession;
pub use submit_big_fish_input_procedure::submit_big_fish_input;
pub use submit_big_fish_message_procedure::submit_big_fish_message;
pub use submit_custom_world_agent_message_procedure::submit_custom_world_agent_message;
pub use submit_puzzle_agent_message_procedure::submit_puzzle_agent_message;

View File

@@ -0,0 +1,59 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::database_migration_operator_procedure_result_type::DatabaseMigrationOperatorProcedureResult;
use super::database_migration_revoke_operator_input_type::DatabaseMigrationRevokeOperatorInput;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
struct RevokeDatabaseMigrationOperatorArgs {
pub input: DatabaseMigrationRevokeOperatorInput,
}
impl __sdk::InModule for RevokeDatabaseMigrationOperatorArgs {
type Module = super::RemoteModule;
}
#[allow(non_camel_case_types)]
/// Extension trait for access to the procedure `revoke_database_migration_operator`.
///
/// Implemented for [`super::RemoteProcedures`].
pub trait revoke_database_migration_operator {
fn revoke_database_migration_operator(&self, input: DatabaseMigrationRevokeOperatorInput) {
self.revoke_database_migration_operator_then(input, |_, _| {});
}
fn revoke_database_migration_operator_then(
&self,
input: DatabaseMigrationRevokeOperatorInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationOperatorProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
);
}
impl revoke_database_migration_operator for super::RemoteProcedures {
fn revoke_database_migration_operator_then(
&self,
input: DatabaseMigrationRevokeOperatorInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<DatabaseMigrationOperatorProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
) {
self.imp
.invoke_procedure_with_callback::<_, DatabaseMigrationOperatorProcedureResult>(
"revoke_database_migration_operator",
RevokeDatabaseMigrationOperatorArgs { input },
__callback,
);
}
}

View File

@@ -1,59 +0,0 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::big_fish_run_procedure_result_type::BigFishRunProcedureResult;
use super::big_fish_run_start_input_type::BigFishRunStartInput;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
struct StartBigFishRunArgs {
pub input: BigFishRunStartInput,
}
impl __sdk::InModule for StartBigFishRunArgs {
type Module = super::RemoteModule;
}
#[allow(non_camel_case_types)]
/// Extension trait for access to the procedure `start_big_fish_run`.
///
/// Implemented for [`super::RemoteProcedures`].
pub trait start_big_fish_run {
fn start_big_fish_run(&self, input: BigFishRunStartInput) {
self.start_big_fish_run_then(input, |_, _| {});
}
fn start_big_fish_run_then(
&self,
input: BigFishRunStartInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<BigFishRunProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
);
}
impl start_big_fish_run for super::RemoteProcedures {
fn start_big_fish_run_then(
&self,
input: BigFishRunStartInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<BigFishRunProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
) {
self.imp
.invoke_procedure_with_callback::<_, BigFishRunProcedureResult>(
"start_big_fish_run",
StartBigFishRunArgs { input },
__callback,
);
}
}

View File

@@ -1,59 +0,0 @@
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD.
#![allow(unused, clippy::all)]
use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws};
use super::big_fish_run_input_submit_input_type::BigFishRunInputSubmitInput;
use super::big_fish_run_procedure_result_type::BigFishRunProcedureResult;
#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)]
#[sats(crate = __lib)]
struct SubmitBigFishInputArgs {
pub input: BigFishRunInputSubmitInput,
}
impl __sdk::InModule for SubmitBigFishInputArgs {
type Module = super::RemoteModule;
}
#[allow(non_camel_case_types)]
/// Extension trait for access to the procedure `submit_big_fish_input`.
///
/// Implemented for [`super::RemoteProcedures`].
pub trait submit_big_fish_input {
fn submit_big_fish_input(&self, input: BigFishRunInputSubmitInput) {
self.submit_big_fish_input_then(input, |_, _| {});
}
fn submit_big_fish_input_then(
&self,
input: BigFishRunInputSubmitInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<BigFishRunProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
);
}
impl submit_big_fish_input for super::RemoteProcedures {
fn submit_big_fish_input_then(
&self,
input: BigFishRunInputSubmitInput,
__callback: impl FnOnce(
&super::ProcedureEventContext,
Result<BigFishRunProcedureResult, __sdk::InternalError>,
) + Send
+ 'static,
) {
self.imp
.invoke_procedure_with_callback::<_, BigFishRunProcedureResult>(
"submit_big_fish_input",
SubmitBigFishInputArgs { input },
__callback,
);
}
}

View File

@@ -11,6 +11,7 @@ crate-type = ["cdylib"]
log = { workspace = true }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
spacetimedb-lib = { version = "=2.1.0", default-features = false, features = ["serde"] }
module-ai = { path = "../module-ai", default-features = false, features = ["spacetime-types"] }
module-assets = { path = "../module-assets", default-features = false, features = ["spacetime-types"] }
module-big-fish = { path = "../module-big-fish", default-features = false, features = ["spacetime-types"] }

View File

@@ -1,9 +1,7 @@
mod assets;
mod runtime;
mod session;
mod tables;
pub use assets::*;
pub use runtime::*;
pub use session::*;
pub use tables::*;

View File

@@ -1,198 +0,0 @@
use crate::big_fish::tables::{big_fish_creation_session, big_fish_runtime_run};
use crate::*;
#[spacetimedb::procedure]
pub fn start_big_fish_run(
ctx: &mut ProcedureContext,
input: BigFishRunStartInput,
) -> BigFishRunProcedureResult {
match ctx.try_with_tx(|tx| start_big_fish_run_tx(tx, input.clone())) {
Ok(run) => BigFishRunProcedureResult {
ok: true,
run: Some(run),
error_message: None,
},
Err(message) => BigFishRunProcedureResult {
ok: false,
run: None,
error_message: Some(message),
},
}
}
#[spacetimedb::procedure]
pub fn submit_big_fish_input(
ctx: &mut ProcedureContext,
input: BigFishRunInputSubmitInput,
) -> BigFishRunProcedureResult {
match ctx.try_with_tx(|tx| submit_big_fish_input_tx(tx, input.clone())) {
Ok(run) => BigFishRunProcedureResult {
ok: true,
run: Some(run),
error_message: None,
},
Err(message) => BigFishRunProcedureResult {
ok: false,
run: None,
error_message: Some(message),
},
}
}
#[spacetimedb::procedure]
pub fn get_big_fish_run(
ctx: &mut ProcedureContext,
input: BigFishRunGetInput,
) -> BigFishRunProcedureResult {
match ctx.try_with_tx(|tx| get_big_fish_run_tx(tx, input.clone())) {
Ok(run) => BigFishRunProcedureResult {
ok: true,
run: Some(run),
error_message: None,
},
Err(message) => BigFishRunProcedureResult {
ok: false,
run: None,
error_message: Some(message),
},
}
}
fn start_big_fish_run_tx(
ctx: &ReducerContext,
input: BigFishRunStartInput,
) -> Result<BigFishRuntimeSnapshot, String> {
validate_run_start_input(&input).map_err(|error| error.to_string())?;
if ctx
.db
.big_fish_runtime_run()
.run_id()
.find(&input.run_id)
.is_some()
{
return Err("big_fish_runtime_run.run_id 已存在".to_string());
}
let session = ctx
.db
.big_fish_creation_session()
.session_id()
.find(&input.session_id)
.ok_or_else(|| "big_fish_creation_session 不存在".to_string())?;
if session.owner_user_id != input.owner_user_id
&& session.stage != BigFishCreationStage::Published
{
return Err("big_fish_creation_session 不存在".to_string());
}
let draft = session
.draft_json
.as_deref()
.ok_or_else(|| "big_fish.draft 尚未编译".to_string())
.and_then(|value| deserialize_draft(value).map_err(|error| error.to_string()))?;
let snapshot = build_initial_runtime_snapshot(
input.run_id.clone(),
input.session_id.clone(),
&draft,
input.started_at_micros,
);
let now = Timestamp::from_micros_since_unix_epoch(input.started_at_micros);
ctx.db.big_fish_runtime_run().insert(BigFishRuntimeRun {
run_id: input.run_id,
session_id: input.session_id,
owner_user_id: input.owner_user_id,
status: snapshot.status,
snapshot_json: serialize_runtime_snapshot(&snapshot).map_err(|error| error.to_string())?,
last_input_x: 0.0,
last_input_y: 0.0,
tick: snapshot.tick,
created_at: now,
updated_at: now,
});
Ok(snapshot)
}
fn submit_big_fish_input_tx(
ctx: &ReducerContext,
input: BigFishRunInputSubmitInput,
) -> Result<BigFishRuntimeSnapshot, String> {
validate_run_input_submit_input(&input).map_err(|error| error.to_string())?;
let run = ctx
.db
.big_fish_runtime_run()
.run_id()
.find(&input.run_id)
.filter(|row| row.owner_user_id == input.owner_user_id)
.ok_or_else(|| "big_fish_runtime_run 不存在".to_string())?;
let session = ctx
.db
.big_fish_creation_session()
.session_id()
.find(&run.session_id)
.ok_or_else(|| "big_fish_creation_session 不存在".to_string())?;
if session.owner_user_id != input.owner_user_id
&& session.stage != BigFishCreationStage::Published
{
return Err("big_fish_creation_session 不存在".to_string());
}
let draft = session
.draft_json
.as_deref()
.ok_or_else(|| "big_fish.draft 尚未编译".to_string())
.and_then(|value| deserialize_draft(value).map_err(|error| error.to_string()))?;
let current_snapshot =
deserialize_runtime_snapshot(&run.snapshot_json).map_err(|error| error.to_string())?;
let next_snapshot = advance_runtime_snapshot(
current_snapshot,
&draft.runtime_params,
input.input_x,
input.input_y,
input.submitted_at_micros,
);
replace_big_fish_run(
ctx,
&run,
BigFishRuntimeRun {
run_id: run.run_id.clone(),
session_id: run.session_id.clone(),
owner_user_id: run.owner_user_id.clone(),
status: next_snapshot.status,
snapshot_json: serialize_runtime_snapshot(&next_snapshot)
.map_err(|error| error.to_string())?,
last_input_x: input.input_x,
last_input_y: input.input_y,
tick: next_snapshot.tick,
created_at: run.created_at,
updated_at: Timestamp::from_micros_since_unix_epoch(input.submitted_at_micros),
},
);
Ok(next_snapshot)
}
fn get_big_fish_run_tx(
ctx: &ReducerContext,
input: BigFishRunGetInput,
) -> Result<BigFishRuntimeSnapshot, String> {
validate_run_get_input(&input).map_err(|error| error.to_string())?;
let run = ctx
.db
.big_fish_runtime_run()
.run_id()
.find(&input.run_id)
.filter(|row| row.owner_user_id == input.owner_user_id)
.ok_or_else(|| "big_fish_runtime_run 不存在".to_string())?;
deserialize_runtime_snapshot(&run.snapshot_json).map_err(|error| error.to_string())
}
fn replace_big_fish_run(
ctx: &ReducerContext,
current: &BigFishRuntimeRun,
next: BigFishRuntimeRun,
) {
ctx.db
.big_fish_runtime_run()
.run_id()
.delete(&current.run_id);
ctx.db.big_fish_runtime_run().insert(next);
}

View File

@@ -1,6 +1,8 @@
use crate::big_fish::tables::{big_fish_agent_message, big_fish_creation_session};
use crate::*;
const INITIAL_BIG_FISH_CREATION_PROGRESS_PERCENT: u32 = 0;
#[spacetimedb::procedure]
pub fn create_big_fish_session(
ctx: &mut ProcedureContext,
@@ -182,7 +184,8 @@ pub(crate) fn create_big_fish_session_tx(
owner_user_id: input.owner_user_id.clone(),
seed_text: input.seed_text.trim().to_string(),
current_turn: 0,
progress_percent: 20,
// 中文注释:欢迎语和初始锚点只建立工作台上下文,不能提前抬高创作进度。
progress_percent: INITIAL_BIG_FISH_CREATION_PROGRESS_PERCENT,
stage: BigFishCreationStage::CollectingAnchors,
anchor_pack_json: serialize_anchor_pack(&anchor_pack)
.map_err(|error| error.to_string())?,
@@ -292,7 +295,7 @@ pub(crate) fn delete_big_fish_work_tx(
.filter(|row| row.owner_user_id == input.owner_user_id)
.ok_or_else(|| "big_fish_creation_session 不存在".to_string())?;
// 删除作品时同步清理 Agent 消息素材槽与运行快照,避免创作页消失后残留孤儿数据
// 删除作品时同步清理 Agent 消息素材槽;最终游玩模拟已经迁到前端,不再写后端运行快照
ctx.db
.big_fish_creation_session()
.session_id()
@@ -318,18 +321,6 @@ pub(crate) fn delete_big_fish_work_tx(
{
ctx.db.big_fish_asset_slot().slot_id().delete(&slot.slot_id);
}
for run in ctx
.db
.big_fish_runtime_run()
.iter()
.filter(|row| {
row.session_id == input.session_id && row.owner_user_id == input.owner_user_id
})
.collect::<Vec<_>>()
{
ctx.db.big_fish_runtime_run().run_id().delete(&run.run_id);
}
list_big_fish_works_tx(
ctx,
BigFishWorksListInput {
@@ -707,6 +698,11 @@ mod tests {
}
}
#[test]
fn initial_big_fish_creation_progress_starts_from_zero() {
assert_eq!(INITIAL_BIG_FISH_CREATION_PROGRESS_PERCENT, 0);
}
#[test]
fn big_fish_direct_work_content_ignores_empty_created_session() {
let empty_session =

View File

@@ -51,22 +51,3 @@ pub struct BigFishAssetSlot {
pub(crate) prompt_snapshot: String,
pub(crate) updated_at: Timestamp,
}
#[spacetimedb::table(
accessor = big_fish_runtime_run,
index(accessor = by_big_fish_run_owner_user_id, btree(columns = [owner_user_id])),
index(accessor = by_big_fish_run_session_id, btree(columns = [session_id]))
)]
pub struct BigFishRuntimeRun {
#[primary_key]
pub(crate) run_id: String,
pub(crate) session_id: String,
pub(crate) owner_user_id: String,
pub(crate) status: BigFishRunStatus,
pub(crate) snapshot_json: String,
pub(crate) last_input_x: f32,
pub(crate) last_input_y: f32,
pub(crate) tick: u64,
pub(crate) created_at: Timestamp,
pub(crate) updated_at: Timestamp,
}

View File

@@ -20,7 +20,9 @@ use module_quest::{
};
pub(crate) use serde_json::{Map as JsonMap, Value as JsonValue, json};
pub(crate) use shared_kernel::format_timestamp_micros;
pub use spacetimedb::{ProcedureContext, ReducerContext, SpacetimeType, Table, Timestamp};
pub use spacetimedb::{
Identity, ProcedureContext, ReducerContext, SpacetimeType, Table, Timestamp,
};
use std::collections::HashSet;
mod ai;
@@ -29,6 +31,7 @@ mod auth;
mod big_fish;
mod domain_types;
mod entry;
mod migration;
mod puzzle;
mod runtime;
@@ -38,6 +41,7 @@ pub use auth::*;
pub use big_fish::*;
pub use domain_types::*;
pub use entry::*;
pub use migration::*;
pub use runtime::*;
#[spacetimedb::table(accessor = player_progression)]

View File

@@ -0,0 +1,713 @@
use crate::*;
use serde::{Deserialize, Serialize};
use spacetimedb_lib::sats::de::serde::DeserializeWrapper;
use spacetimedb_lib::sats::ser::serde::SerializeWrapper;
use std::collections::HashSet;
use crate::puzzle::{
puzzle_agent_message, puzzle_agent_session, puzzle_runtime_run, puzzle_work_profile,
};
const MIGRATION_SCHEMA_VERSION: u32 = 1;
const MIGRATION_MAX_TABLE_NAME_LEN: usize = 96;
const MIGRATION_MAX_OPERATOR_NOTE_CHARS: usize = 160;
const MIGRATION_MIN_BOOTSTRAP_SECRET_LEN: usize = 16;
const MIGRATION_BOOTSTRAP_SECRET: Option<&str> =
option_env!("GENARRATIVE_SPACETIME_MIGRATION_BOOTSTRAP_SECRET");
#[spacetimedb::table(accessor = database_migration_operator)]
pub struct DatabaseMigrationOperator {
#[primary_key]
pub operator_identity: Identity,
pub created_at: Timestamp,
pub created_by: Identity,
pub note: String,
}
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct DatabaseMigrationExportInput {
pub include_tables: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct DatabaseMigrationImportInput {
pub migration_json: String,
pub include_tables: Vec<String>,
pub replace_existing: bool,
pub dry_run: bool,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum DatabaseMigrationImportMode {
Strict,
Incremental,
}
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct DatabaseMigrationAuthorizeOperatorInput {
pub bootstrap_secret: String,
pub operator_identity_hex: String,
pub note: String,
}
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct DatabaseMigrationRevokeOperatorInput {
pub operator_identity_hex: String,
}
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct DatabaseMigrationTableStat {
pub table_name: String,
pub exported_row_count: u64,
pub imported_row_count: u64,
pub skipped_row_count: u64,
}
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct DatabaseMigrationProcedureResult {
pub ok: bool,
pub schema_version: u32,
pub migration_json: Option<String>,
pub table_stats: Vec<DatabaseMigrationTableStat>,
pub error_message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct DatabaseMigrationOperatorProcedureResult {
pub ok: bool,
pub operator_identity_hex: Option<String>,
pub error_message: Option<String>,
}
#[derive(Serialize, Deserialize)]
struct MigrationFile {
schema_version: u32,
exported_at_micros: i64,
tables: Vec<MigrationTable>,
}
#[derive(Serialize, Deserialize)]
struct MigrationTable {
name: String,
rows: Vec<serde_json::Value>,
}
macro_rules! migration_tables {
($macro_name:ident $(, $arg:expr)* $(,)?) => {
$macro_name! {
$($arg,)*
auth_store_snapshot,
user_account,
auth_identity,
refresh_session,
ai_task,
ai_task_stage,
ai_text_chunk,
ai_result_reference,
runtime_snapshot,
runtime_setting,
user_browse_history,
profile_dashboard_state,
profile_wallet_ledger,
profile_invite_code,
profile_referral_relation,
profile_played_world,
profile_membership,
profile_recharge_order,
profile_save_archive,
player_progression,
chapter_progression,
npc_state,
story_session,
story_event,
inventory_slot,
battle_state,
treasure_record,
quest_record,
quest_log,
custom_world_profile,
custom_world_session,
custom_world_agent_session,
custom_world_agent_message,
custom_world_agent_operation,
custom_world_draft_card,
custom_world_gallery_entry,
asset_object,
asset_entity_binding,
puzzle_agent_session,
puzzle_agent_message,
puzzle_work_profile,
puzzle_runtime_run,
big_fish_creation_session,
big_fish_agent_message,
big_fish_asset_slot
}
};
}
macro_rules! collect_all_migration_tables {
($ctx:expr, $include_tables:expr, $tables:expr) => {
migration_tables!(collect_migration_table, $ctx, $include_tables, $tables);
};
}
macro_rules! collect_migration_table {
($ctx:expr, $include_tables:expr, $tables:expr, $($table:ident),+ $(,)?) => {
$(
if should_include_table($include_tables, stringify!($table)) {
let rows = $ctx
.db
.$table()
.iter()
.map(|row| row_to_json(&row))
.collect::<Result<Vec<_>, _>>()?;
$tables.push(MigrationTable {
name: stringify!($table).to_string(),
rows,
});
}
)+
};
}
macro_rules! clear_all_migration_tables {
($ctx:expr, $include_tables:expr) => {
migration_tables!(clear_migration_table, $ctx, $include_tables);
};
}
macro_rules! clear_migration_table {
($ctx:expr, $include_tables:expr, $($table:ident),+ $(,)?) => {
$(
if should_include_table($include_tables, stringify!($table)) {
for row in $ctx.db.$table().iter().collect::<Vec<_>>() {
$ctx.db.$table().delete(row);
}
}
)+
};
}
// 迁移权限独立存表,避免把 private 表导出能力开放给任意登录身份。
#[spacetimedb::procedure]
pub fn authorize_database_migration_operator(
ctx: &mut ProcedureContext,
input: DatabaseMigrationAuthorizeOperatorInput,
) -> DatabaseMigrationOperatorProcedureResult {
match authorize_database_migration_operator_inner(ctx, input) {
Ok(operator_identity_hex) => DatabaseMigrationOperatorProcedureResult {
ok: true,
operator_identity_hex: Some(operator_identity_hex),
error_message: None,
},
Err(error) => DatabaseMigrationOperatorProcedureResult {
ok: false,
operator_identity_hex: None,
error_message: Some(error),
},
}
}
#[spacetimedb::procedure]
pub fn revoke_database_migration_operator(
ctx: &mut ProcedureContext,
input: DatabaseMigrationRevokeOperatorInput,
) -> DatabaseMigrationOperatorProcedureResult {
match revoke_database_migration_operator_inner(ctx, input) {
Ok(operator_identity_hex) => DatabaseMigrationOperatorProcedureResult {
ok: true,
operator_identity_hex: Some(operator_identity_hex),
error_message: None,
},
Err(error) => DatabaseMigrationOperatorProcedureResult {
ok: false,
operator_identity_hex: None,
error_message: Some(error),
},
}
}
// 迁移导出走 procedure 返回 JSON 字符串,避免 reducer 无返回值且不能读取 private 表给外部。
#[spacetimedb::procedure]
pub fn export_database_migration_to_file(
ctx: &mut ProcedureContext,
input: DatabaseMigrationExportInput,
) -> DatabaseMigrationProcedureResult {
match export_database_migration_to_file_inner(ctx, input) {
Ok((migration_json, stats)) => DatabaseMigrationProcedureResult {
ok: true,
schema_version: MIGRATION_SCHEMA_VERSION,
migration_json: Some(migration_json),
table_stats: stats,
error_message: None,
},
Err(error) => DatabaseMigrationProcedureResult {
ok: false,
schema_version: MIGRATION_SCHEMA_VERSION,
migration_json: None,
table_stats: Vec::new(),
error_message: Some(error),
},
}
}
// 迁移导入由 Node 侧读文件后把 JSON 字符串传入procedure 只负责校验和写表事务。
#[spacetimedb::procedure]
pub fn import_database_migration_from_file(
ctx: &mut ProcedureContext,
input: DatabaseMigrationImportInput,
) -> DatabaseMigrationProcedureResult {
match import_database_migration_from_file_inner(ctx, input, DatabaseMigrationImportMode::Strict)
{
Ok(stats) => DatabaseMigrationProcedureResult {
ok: true,
schema_version: MIGRATION_SCHEMA_VERSION,
migration_json: None,
table_stats: stats,
error_message: None,
},
Err(error) => DatabaseMigrationProcedureResult {
ok: false,
schema_version: MIGRATION_SCHEMA_VERSION,
migration_json: None,
table_stats: Vec::new(),
error_message: Some(error),
},
}
}
// 增量导入只插入目标库缺失的行;主键或唯一约束冲突的行会跳过,不更新已有数据。
#[spacetimedb::procedure]
pub fn import_database_migration_incremental_from_file(
ctx: &mut ProcedureContext,
input: DatabaseMigrationImportInput,
) -> DatabaseMigrationProcedureResult {
match import_database_migration_from_file_inner(
ctx,
input,
DatabaseMigrationImportMode::Incremental,
) {
Ok(stats) => DatabaseMigrationProcedureResult {
ok: true,
schema_version: MIGRATION_SCHEMA_VERSION,
migration_json: None,
table_stats: stats,
error_message: None,
},
Err(error) => DatabaseMigrationProcedureResult {
ok: false,
schema_version: MIGRATION_SCHEMA_VERSION,
migration_json: None,
table_stats: Vec::new(),
error_message: Some(error),
},
}
}
fn export_database_migration_to_file_inner(
ctx: &mut ProcedureContext,
input: DatabaseMigrationExportInput,
) -> Result<(String, Vec<DatabaseMigrationTableStat>), String> {
let caller = ctx.sender();
let included_tables = normalize_include_tables(&input.include_tables)?;
let exported_at_micros = ctx.timestamp.to_micros_since_unix_epoch();
let migration_file = ctx.try_with_tx(|tx| {
require_migration_operator(tx, caller)?;
build_migration_file(tx, exported_at_micros, included_tables.as_ref())
})?;
let stats = build_export_stats(&migration_file.tables);
let content = serde_json::to_string_pretty(&migration_file)
.map_err(|error| format!("迁移文件序列化失败: {error}"))?;
Ok((content, stats))
}
fn import_database_migration_from_file_inner(
ctx: &mut ProcedureContext,
input: DatabaseMigrationImportInput,
import_mode: DatabaseMigrationImportMode,
) -> Result<Vec<DatabaseMigrationTableStat>, String> {
let caller = ctx.sender();
let included_tables = normalize_include_tables(&input.include_tables)?;
if import_mode == DatabaseMigrationImportMode::Incremental && input.replace_existing {
return Err("增量导入不能同时启用 replace_existing".to_string());
}
if input.migration_json.trim().is_empty() {
return Err("migration_json 不能为空".to_string());
}
ctx.try_with_tx(|tx| require_migration_operator(tx, caller))?;
let migration_file = serde_json::from_str::<MigrationFile>(&input.migration_json)
.map_err(|error| format!("迁移文件 JSON 解析失败: {error}"))?;
if migration_file.schema_version != MIGRATION_SCHEMA_VERSION {
return Err(format!(
"迁移文件 schema_version 不匹配,期望 {},实际 {}",
MIGRATION_SCHEMA_VERSION, migration_file.schema_version
));
}
let stats = if input.dry_run {
build_import_dry_run_stats(&migration_file.tables, included_tables.as_ref())?
} else {
ctx.try_with_tx(|tx| {
require_migration_operator(tx, caller)?;
apply_migration_file(
tx,
&migration_file,
included_tables.as_ref(),
input.replace_existing,
import_mode,
)
})?
};
Ok(stats)
}
fn authorize_database_migration_operator_inner(
ctx: &mut ProcedureContext,
input: DatabaseMigrationAuthorizeOperatorInput,
) -> Result<String, String> {
let caller = ctx.sender();
let operator_identity = parse_migration_operator_identity(&input.operator_identity_hex)?;
let note = normalize_migration_operator_note(&input.note)?;
let bootstrap_secret = input.bootstrap_secret.trim().to_string();
ctx.try_with_tx(|tx| {
authorize_database_migration_operator_tx(
tx,
caller,
operator_identity,
&bootstrap_secret,
note.clone(),
)
})?;
Ok(operator_identity.to_hex().to_string())
}
fn revoke_database_migration_operator_inner(
ctx: &mut ProcedureContext,
input: DatabaseMigrationRevokeOperatorInput,
) -> Result<String, String> {
let caller = ctx.sender();
let operator_identity = parse_migration_operator_identity(&input.operator_identity_hex)?;
ctx.try_with_tx(|tx| {
require_migration_operator(tx, caller)?;
if tx
.db
.database_migration_operator()
.operator_identity()
.find(&operator_identity)
.is_none()
{
return Err("迁移操作员不存在".to_string());
}
tx.db
.database_migration_operator()
.operator_identity()
.delete(&operator_identity);
Ok(())
})?;
Ok(operator_identity.to_hex().to_string())
}
fn authorize_database_migration_operator_tx(
ctx: &ReducerContext,
caller: Identity,
operator_identity: Identity,
bootstrap_secret: &str,
note: String,
) -> Result<(), String> {
let has_operator = ctx.db.database_migration_operator().iter().next().is_some();
if has_operator {
require_migration_operator(ctx, caller)?;
} else {
require_migration_bootstrap_secret(bootstrap_secret)?;
}
if ctx
.db
.database_migration_operator()
.operator_identity()
.find(&operator_identity)
.is_some()
{
ctx.db
.database_migration_operator()
.operator_identity()
.delete(&operator_identity);
}
ctx.db
.database_migration_operator()
.insert(DatabaseMigrationOperator {
operator_identity,
created_at: ctx.timestamp,
created_by: caller,
note,
});
Ok(())
}
fn require_migration_operator(ctx: &ReducerContext, caller: Identity) -> Result<(), String> {
if ctx
.db
.database_migration_operator()
.operator_identity()
.find(&caller)
.is_some()
{
Ok(())
} else {
Err("当前 identity 未被授权执行数据库迁移".to_string())
}
}
fn require_migration_bootstrap_secret(input: &str) -> Result<(), String> {
let configured_secret = MIGRATION_BOOTSTRAP_SECRET
.map(str::trim)
.filter(|secret| !secret.is_empty())
.ok_or_else(|| "迁移引导密钥未配置,无法创建首个操作员".to_string())?;
if configured_secret.chars().count() < MIGRATION_MIN_BOOTSTRAP_SECRET_LEN {
return Err("迁移引导密钥长度不足,至少需要 16 个字符".to_string());
}
if input != configured_secret {
return Err("迁移引导密钥不正确".to_string());
}
Ok(())
}
fn parse_migration_operator_identity(input: &str) -> Result<Identity, String> {
let identity_hex = input.trim().trim_start_matches("0x");
if identity_hex.len() != 64 {
return Err("operator_identity_hex 必须是 64 位十六进制 identity".to_string());
}
Identity::from_hex(identity_hex)
.map_err(|error| format!("operator_identity_hex 格式不合法: {error}"))
}
fn normalize_migration_operator_note(input: &str) -> Result<String, String> {
let note = input.trim();
if note.chars().count() > MIGRATION_MAX_OPERATOR_NOTE_CHARS {
return Err(format!(
"迁移操作员备注过长,最多 {} 个字符",
MIGRATION_MAX_OPERATOR_NOTE_CHARS
));
}
Ok(note.to_string())
}
fn normalize_include_tables(input: &[String]) -> Result<Option<HashSet<String>>, String> {
if input.is_empty() {
return Ok(None);
}
let mut tables = HashSet::new();
for raw_name in input {
let name = raw_name.trim();
if name.is_empty() {
continue;
}
if name.len() > MIGRATION_MAX_TABLE_NAME_LEN {
return Err(format!("迁移表名过长: {name}"));
}
if !is_supported_migration_table(name) {
return Err(format!("迁移表不在白名单内: {name}"));
}
tables.insert(name.to_string());
}
Ok(Some(tables))
}
fn should_include_table(include_tables: Option<&HashSet<String>>, table_name: &str) -> bool {
include_tables
.map(|tables| tables.contains(table_name))
.unwrap_or(true)
}
fn build_migration_file(
ctx: &ReducerContext,
exported_at_micros: i64,
include_tables: Option<&HashSet<String>>,
) -> Result<MigrationFile, String> {
let mut tables = Vec::new();
collect_all_migration_tables!(ctx, include_tables, tables);
Ok(MigrationFile {
schema_version: MIGRATION_SCHEMA_VERSION,
exported_at_micros,
tables,
})
}
fn build_export_stats(tables: &[MigrationTable]) -> Vec<DatabaseMigrationTableStat> {
tables
.iter()
.map(|table| DatabaseMigrationTableStat {
table_name: table.name.clone(),
exported_row_count: table.rows.len() as u64,
imported_row_count: 0,
skipped_row_count: 0,
})
.collect()
}
fn build_import_dry_run_stats(
tables: &[MigrationTable],
include_tables: Option<&HashSet<String>>,
) -> Result<Vec<DatabaseMigrationTableStat>, String> {
let mut stats = Vec::new();
for table in tables {
if !is_supported_migration_table(&table.name) {
return Err(format!("迁移文件包含不支持的表: {}", table.name));
}
if should_include_table(include_tables, &table.name) {
stats.push(DatabaseMigrationTableStat {
table_name: table.name.clone(),
exported_row_count: 0,
imported_row_count: table.rows.len() as u64,
skipped_row_count: 0,
});
} else {
stats.push(DatabaseMigrationTableStat {
table_name: table.name.clone(),
exported_row_count: 0,
imported_row_count: 0,
skipped_row_count: table.rows.len() as u64,
});
}
}
Ok(stats)
}
fn apply_migration_file(
ctx: &ReducerContext,
migration_file: &MigrationFile,
include_tables: Option<&HashSet<String>>,
replace_existing: bool,
import_mode: DatabaseMigrationImportMode,
) -> Result<Vec<DatabaseMigrationTableStat>, String> {
let mut stats = Vec::new();
for table in &migration_file.tables {
if !is_supported_migration_table(&table.name) {
return Err(format!("迁移文件包含不支持的表: {}", table.name));
}
}
let import_table_names = build_import_table_name_set(migration_file, include_tables);
if replace_existing {
// replace_existing 只覆盖本次迁移文件实际会导入的表,避免分批导入时误清空其它迁移白名单表。
clear_all_migration_tables!(ctx, Some(&import_table_names));
}
for table in &migration_file.tables {
if !should_include_table(include_tables, &table.name) {
stats.push(DatabaseMigrationTableStat {
table_name: table.name.clone(),
exported_row_count: 0,
imported_row_count: 0,
skipped_row_count: table.rows.len() as u64,
});
continue;
}
let (imported_row_count, skipped_row_count) =
insert_migration_table_rows(ctx, table, import_mode)?;
stats.push(DatabaseMigrationTableStat {
table_name: table.name.clone(),
exported_row_count: 0,
imported_row_count,
skipped_row_count,
});
}
Ok(stats)
}
fn build_import_table_name_set(
migration_file: &MigrationFile,
include_tables: Option<&HashSet<String>>,
) -> HashSet<String> {
migration_file
.tables
.iter()
.filter(|table| should_include_table(include_tables, &table.name))
.map(|table| table.name.clone())
.collect()
}
fn row_to_json<T: spacetimedb::Serialize>(row: &T) -> Result<serde_json::Value, String> {
serde_json::to_value(SerializeWrapper::from_ref(row))
.map_err(|error| format!("迁移行序列化失败: {error}"))
}
fn row_from_json<T>(value: &serde_json::Value) -> Result<T, String>
where
T: for<'de> spacetimedb::Deserialize<'de>,
{
let wrapped: DeserializeWrapper<T> = serde_json::from_value(value.clone())
.map_err(|error| format!("迁移行反序列化失败: {error}"))?;
Ok(wrapped.0)
}
fn insert_migration_table_rows(
ctx: &ReducerContext,
table: &MigrationTable,
import_mode: DatabaseMigrationImportMode,
) -> Result<(u64, u64), String> {
macro_rules! insert_table_match_arm {
($($table:ident),+ $(,)?) => {
match table.name.as_str() {
$(
stringify!($table) => {
let mut imported = 0u64;
let mut skipped = 0u64;
for value in &table.rows {
let row = row_from_json(value)
.map_err(|error| format!("{}: {error}", stringify!($table)))?;
let insert_result = ctx.db
.$table()
.try_insert(row);
match insert_result {
Ok(_) => imported = imported.saturating_add(1),
Err(error) => {
if import_mode == DatabaseMigrationImportMode::Incremental {
skipped = skipped.saturating_add(1);
} else {
return Err(format!("{} 导入失败: {error}", stringify!($table)));
}
}
}
}
Ok((imported, skipped))
}
)+
_ => Err(format!("迁移表不在白名单内: {}", table.name)),
}
};
}
migration_tables!(insert_table_match_arm)
}
fn is_supported_migration_table(table_name: &str) -> bool {
macro_rules! supported_table_match {
($($table:ident),+ $(,)?) => {
matches!(
table_name,
$(stringify!($table))|+
)
};
}
migration_tables!(supported_table_match)
}