feat: complete M7 cutover preparation

This commit is contained in:
2026-04-22 17:46:50 +08:00
parent 4c8ba535e4
commit 2fe0a9083d
19 changed files with 8258 additions and 7809 deletions

View File

@@ -14,7 +14,7 @@
## 2. 当前阶段说明
当前目录已经完成以下三十项初始化:
当前目录已经完成以下三十项初始化:
1. 为新后端预留正式目录并把路径固定到仓库结构中。
2. 创建虚拟 workspace `Cargo.toml`,后续 crate 会逐项挂入。
@@ -52,6 +52,9 @@
34. 创建 `scripts/spacetime-dev.ps1`,固定 Windows 本地 SpacetimeDB 启动入口。
35. 创建 `scripts/spacetime-dev.sh`,固定 Unix-like 本地 SpacetimeDB 启动入口。
36. 创建 `scripts/oss-smoke.ps1`,固定 Windows 本地阿里云 OSS 真实联调入口。
37. 创建 `scripts/m7-preflight.ps1`,固定 M7 切流前 Rust 后端预检入口。
38. 创建根目录 `scripts/m7-api-compare.ts`,固定旧 Node 与新 Rust 的无状态 API contract 对比入口。
39. 固定 Vite dev proxy 的 `GENARRATIVE_BACKEND_STACK` / `GENARRATIVE_RUNTIME_SERVER_TARGET` 切流和回退开关。
后续任务会继续在本目录内按顺序补齐:

View File

@@ -6,8 +6,11 @@ use axum::{
middleware,
routing::{get, post},
};
use tower_http::trace::{DefaultOnFailure, DefaultOnRequest, DefaultOnResponse, TraceLayer};
use tracing::{Level, info_span};
use tower_http::{
classify::ServerErrorsFailureClass,
trace::{DefaultOnRequest, TraceLayer},
};
use tracing::{Level, Span, error, info, info_span, warn};
use crate::{
ai_tasks::{
@@ -86,6 +89,8 @@ use crate::{
// 统一由这里构造 Axum 路由树,后续再逐项挂接中间件与业务路由。
pub fn build_router(state: AppState) -> Router {
let slow_request_threshold_ms = state.config.slow_request_threshold_ms;
Router::new()
.route(
"/healthz",
@@ -688,8 +693,39 @@ pub fn build_router(state: AppState) -> Router {
)
})
.on_request(DefaultOnRequest::new().level(Level::INFO))
.on_response(DefaultOnResponse::new().level(Level::INFO))
.on_failure(DefaultOnFailure::new().level(Level::ERROR)),
.on_response(move |response: &axum::response::Response, latency: std::time::Duration, span: &Span| {
let latency_ms = latency.as_millis().min(u64::MAX as u128) as u64;
let status = response.status().as_u16();
let slow_request = latency_ms >= slow_request_threshold_ms;
span.record("status", status);
span.record("latency_ms", latency_ms);
if slow_request {
warn!(
parent: span,
status,
latency_ms,
slow_request = true,
"http request completed slowly"
);
} else {
info!(
parent: span,
status,
latency_ms,
slow_request = false,
"http request completed"
);
}
})
.on_failure(|failure: ServerErrorsFailureClass, latency: std::time::Duration, span: &Span| {
let latency_ms = latency.as_millis().min(u64::MAX as u128) as u64;
error!(
parent: span,
latency_ms,
failure = %failure,
"http request failed"
);
}),
)
// request_id 中间件先进入请求链,确保后续 tracing、错误处理和响应头层都能复用同一份请求标识。
.layer(middleware::from_fn(attach_request_context))

View File

@@ -54,6 +54,7 @@ pub struct AppConfig {
pub llm_request_timeout_ms: u64,
pub llm_max_retries: u32,
pub llm_retry_backoff_ms: u64,
pub slow_request_threshold_ms: u64,
}
impl Default for AppConfig {
@@ -104,6 +105,7 @@ impl Default for AppConfig {
llm_request_timeout_ms: DEFAULT_REQUEST_TIMEOUT_MS,
llm_max_retries: DEFAULT_MAX_RETRIES,
llm_retry_backoff_ms: DEFAULT_RETRY_BACKOFF_MS,
slow_request_threshold_ms: 1_000,
}
}
}
@@ -305,6 +307,12 @@ impl AppConfig {
config.llm_retry_backoff_ms = llm_retry_backoff_ms;
}
if let Some(slow_request_threshold_ms) =
read_first_positive_u64_env(&["GENARRATIVE_SLOW_REQUEST_THRESHOLD_MS"])
{
config.slow_request_threshold_ms = slow_request_threshold_ms;
}
config
}

View File

@@ -0,0 +1,753 @@
#[spacetimedb::table(
accessor = ai_task,
index(accessor = by_ai_task_owner_user_id, btree(columns = [owner_user_id])),
index(accessor = by_ai_task_status, btree(columns = [status])),
index(accessor = by_ai_task_kind, btree(columns = [task_kind]))
)]
pub struct AiTask {
#[primary_key]
task_id: String,
task_kind: AiTaskKind,
owner_user_id: String,
request_label: String,
source_module: String,
source_entity_id: Option<String>,
request_payload_json: Option<String>,
status: AiTaskStatus,
failure_message: Option<String>,
latest_text_output: Option<String>,
latest_structured_payload_json: Option<String>,
version: u32,
created_at: Timestamp,
started_at: Option<Timestamp>,
completed_at: Option<Timestamp>,
updated_at: Timestamp,
}
#[spacetimedb::table(
accessor = ai_task_stage,
index(accessor = by_ai_task_stage_task_id, btree(columns = [task_id])),
index(accessor = by_ai_task_stage_task_order, btree(columns = [task_id, stage_order]))
)]
pub struct AiTaskStage {
#[primary_key]
task_stage_id: String,
task_id: String,
stage_kind: AiTaskStageKind,
label: String,
detail: String,
stage_order: u32,
status: AiTaskStageStatus,
text_output: Option<String>,
structured_payload_json: Option<String>,
warning_messages: Vec<String>,
started_at: Option<Timestamp>,
completed_at: Option<Timestamp>,
}
#[spacetimedb::table(
accessor = ai_text_chunk,
index(accessor = by_ai_text_chunk_task_id, btree(columns = [task_id])),
index(
accessor = by_ai_text_chunk_task_stage_sequence,
btree(columns = [task_id, stage_kind, sequence])
)
)]
pub struct AiTextChunk {
#[primary_key]
text_chunk_row_id: String,
chunk_id: String,
task_id: String,
stage_kind: AiTaskStageKind,
sequence: u32,
delta_text: String,
created_at: Timestamp,
}
#[spacetimedb::table(
accessor = ai_result_reference,
index(accessor = by_ai_result_reference_task_id, btree(columns = [task_id]))
)]
pub struct AiResultReference {
#[primary_key]
result_reference_row_id: String,
result_ref_id: String,
task_id: String,
reference_kind: AiResultReferenceKind,
reference_id: String,
label: Option<String>,
created_at: Timestamp,
}
// AI 任务当前先固定成 private 真相表,后续由 Axum / platform-llm 再往外包一层 HTTP 与 SSE 协议。
#[spacetimedb::reducer]
pub fn create_ai_task(ctx: &ReducerContext, input: AiTaskCreateInput) -> Result<(), String> {
create_ai_task_tx(ctx, input).map(|_| ())
}
#[spacetimedb::procedure]
pub fn create_ai_task_and_return(
ctx: &mut ProcedureContext,
input: AiTaskCreateInput,
) -> AiTaskProcedureResult {
match ctx.try_with_tx(|tx| create_ai_task_tx(tx, input.clone())) {
Ok(task) => AiTaskProcedureResult {
ok: true,
task: Some(task),
text_chunk: None,
error_message: None,
},
Err(message) => AiTaskProcedureResult {
ok: false,
task: None,
text_chunk: None,
error_message: Some(message),
},
}
}
#[spacetimedb::reducer]
pub fn start_ai_task(ctx: &ReducerContext, input: AiTaskStartInput) -> Result<(), String> {
start_ai_task_tx(ctx, input).map(|_| ())
}
#[spacetimedb::reducer]
pub fn start_ai_task_stage(
ctx: &ReducerContext,
input: AiTaskStageStartInput,
) -> Result<(), String> {
start_ai_task_stage_tx(ctx, input).map(|_| ())
}
// 流式增量写入需要同步返回 chunk 与聚合后的任务快照,方便后续 Axum facade 直接复用。
#[spacetimedb::procedure]
pub fn append_ai_text_chunk_and_return(
ctx: &mut ProcedureContext,
input: AiTextChunkAppendInput,
) -> AiTaskProcedureResult {
match ctx.try_with_tx(|tx| append_ai_text_chunk_tx(tx, input.clone())) {
Ok((task, text_chunk)) => AiTaskProcedureResult {
ok: true,
task: Some(task),
text_chunk: Some(text_chunk),
error_message: None,
},
Err(message) => AiTaskProcedureResult {
ok: false,
task: None,
text_chunk: None,
error_message: Some(message),
},
}
}
#[spacetimedb::procedure]
pub fn complete_ai_stage_and_return(
ctx: &mut ProcedureContext,
input: AiStageCompletionInput,
) -> AiTaskProcedureResult {
match ctx.try_with_tx(|tx| complete_ai_stage_tx(tx, input.clone())) {
Ok(task) => AiTaskProcedureResult {
ok: true,
task: Some(task),
text_chunk: None,
error_message: None,
},
Err(message) => AiTaskProcedureResult {
ok: false,
task: None,
text_chunk: None,
error_message: Some(message),
},
}
}
#[spacetimedb::procedure]
pub fn attach_ai_result_reference_and_return(
ctx: &mut ProcedureContext,
input: AiResultReferenceInput,
) -> AiTaskProcedureResult {
match ctx.try_with_tx(|tx| attach_ai_result_reference_tx(tx, input.clone())) {
Ok(task) => AiTaskProcedureResult {
ok: true,
task: Some(task),
text_chunk: None,
error_message: None,
},
Err(message) => AiTaskProcedureResult {
ok: false,
task: None,
text_chunk: None,
error_message: Some(message),
},
}
}
#[spacetimedb::procedure]
pub fn complete_ai_task_and_return(
ctx: &mut ProcedureContext,
input: AiTaskFinishInput,
) -> AiTaskProcedureResult {
match ctx.try_with_tx(|tx| complete_ai_task_tx(tx, input.clone())) {
Ok(task) => AiTaskProcedureResult {
ok: true,
task: Some(task),
text_chunk: None,
error_message: None,
},
Err(message) => AiTaskProcedureResult {
ok: false,
task: None,
text_chunk: None,
error_message: Some(message),
},
}
}
#[spacetimedb::procedure]
pub fn fail_ai_task_and_return(
ctx: &mut ProcedureContext,
input: AiTaskFailureInput,
) -> AiTaskProcedureResult {
match ctx.try_with_tx(|tx| fail_ai_task_tx(tx, input.clone())) {
Ok(task) => AiTaskProcedureResult {
ok: true,
task: Some(task),
text_chunk: None,
error_message: None,
},
Err(message) => AiTaskProcedureResult {
ok: false,
task: None,
text_chunk: None,
error_message: Some(message),
},
}
}
#[spacetimedb::procedure]
pub fn cancel_ai_task_and_return(
ctx: &mut ProcedureContext,
input: AiTaskCancelInput,
) -> AiTaskProcedureResult {
match ctx.try_with_tx(|tx| cancel_ai_task_tx(tx, input.clone())) {
Ok(task) => AiTaskProcedureResult {
ok: true,
task: Some(task),
text_chunk: None,
error_message: None,
},
Err(message) => AiTaskProcedureResult {
ok: false,
task: None,
text_chunk: None,
error_message: Some(message),
},
}
}
fn create_ai_task_tx(
ctx: &ReducerContext,
input: AiTaskCreateInput,
) -> Result<AiTaskSnapshot, String> {
validate_task_create_input(&input).map_err(|error| error.to_string())?;
if ctx.db.ai_task().task_id().find(&input.task_id).is_some() {
return Err("ai_task.task_id 已存在".to_string());
}
let task_snapshot = build_ai_task_snapshot_from_create_input(&input);
ctx.db.ai_task().insert(build_ai_task_row(&task_snapshot));
replace_ai_task_stages(ctx, &task_snapshot.task_id, &task_snapshot.stages);
get_ai_task_snapshot_tx(ctx, &task_snapshot.task_id)
}
fn start_ai_task_tx(
ctx: &ReducerContext,
input: AiTaskStartInput,
) -> Result<AiTaskSnapshot, String> {
let mut snapshot = get_ai_task_snapshot_tx(ctx, &input.task_id)?;
ensure_ai_task_can_transition(snapshot.status)?;
snapshot.status = AiTaskStatus::Running;
if snapshot.started_at_micros.is_none() {
snapshot.started_at_micros = Some(input.started_at_micros);
}
snapshot.updated_at_micros = input.started_at_micros;
snapshot.version += 1;
persist_ai_task_snapshot(ctx, &snapshot)?;
Ok(snapshot)
}
fn start_ai_task_stage_tx(
ctx: &ReducerContext,
input: AiTaskStageStartInput,
) -> Result<AiTaskSnapshot, String> {
let mut snapshot = get_ai_task_snapshot_tx(ctx, &input.task_id)?;
ensure_ai_task_can_transition(snapshot.status)?;
let stage = snapshot
.stages
.iter_mut()
.find(|stage| stage.stage_kind == input.stage_kind)
.ok_or_else(|| "ai_task.stage 不存在".to_string())?;
snapshot.status = AiTaskStatus::Running;
if snapshot.started_at_micros.is_none() {
snapshot.started_at_micros = Some(input.started_at_micros);
}
stage.status = AiTaskStageStatus::Running;
if stage.started_at_micros.is_none() {
stage.started_at_micros = Some(input.started_at_micros);
}
snapshot.updated_at_micros = input.started_at_micros;
snapshot.version += 1;
persist_ai_task_snapshot(ctx, &snapshot)?;
Ok(snapshot)
}
fn append_ai_text_chunk_tx(
ctx: &ReducerContext,
input: AiTextChunkAppendInput,
) -> Result<(AiTaskSnapshot, AiTextChunkSnapshot), String> {
if input.delta_text.trim().is_empty() {
return Err("ai_text_chunk.delta_text 不能为空".to_string());
}
if input.sequence == 0 {
return Err("ai_text_chunk.sequence 必须大于 0".to_string());
}
let mut snapshot = get_ai_task_snapshot_tx(ctx, &input.task_id)?;
ensure_ai_task_can_transition(snapshot.status)?;
let stage = snapshot
.stages
.iter_mut()
.find(|stage| stage.stage_kind == input.stage_kind)
.ok_or_else(|| "ai_task.stage 不存在".to_string())?;
let chunk = AiTextChunkSnapshot {
chunk_id: generate_ai_text_chunk_id(input.created_at_micros, input.sequence),
task_id: input.task_id.trim().to_string(),
stage_kind: input.stage_kind,
sequence: input.sequence,
delta_text: input.delta_text.trim().to_string(),
created_at_micros: input.created_at_micros,
};
ctx.db
.ai_text_chunk()
.insert(build_ai_text_chunk_row(&chunk));
let aggregated_text = collect_ai_stage_text_output(ctx, &chunk.task_id, chunk.stage_kind);
snapshot.status = AiTaskStatus::Running;
if snapshot.started_at_micros.is_none() {
snapshot.started_at_micros = Some(input.created_at_micros);
}
stage.status = AiTaskStageStatus::Running;
if stage.started_at_micros.is_none() {
stage.started_at_micros = Some(input.created_at_micros);
}
stage.text_output = aggregated_text.clone();
snapshot.latest_text_output = aggregated_text;
snapshot.updated_at_micros = input.created_at_micros;
snapshot.version += 1;
persist_ai_task_snapshot(ctx, &snapshot)?;
Ok((snapshot, chunk))
}
fn complete_ai_stage_tx(
ctx: &ReducerContext,
input: AiStageCompletionInput,
) -> Result<AiTaskSnapshot, String> {
let mut snapshot = get_ai_task_snapshot_tx(ctx, &input.task_id)?;
ensure_ai_task_can_transition(snapshot.status)?;
let stage = snapshot
.stages
.iter_mut()
.find(|stage| stage.stage_kind == input.stage_kind)
.ok_or_else(|| "ai_task.stage 不存在".to_string())?;
stage.status = AiTaskStageStatus::Completed;
stage.completed_at_micros = Some(input.completed_at_micros);
stage.text_output = normalize_optional_text(input.text_output.clone());
stage.structured_payload_json = normalize_optional_text(input.structured_payload_json.clone());
stage.warning_messages = normalize_string_list(input.warning_messages.clone());
snapshot.latest_text_output = stage.text_output.clone();
snapshot.latest_structured_payload_json = stage.structured_payload_json.clone();
snapshot.updated_at_micros = input.completed_at_micros;
snapshot.version += 1;
persist_ai_task_snapshot(ctx, &snapshot)?;
Ok(snapshot)
}
fn attach_ai_result_reference_tx(
ctx: &ReducerContext,
input: AiResultReferenceInput,
) -> Result<AiTaskSnapshot, String> {
let reference_id = input.reference_id.trim().to_string();
if reference_id.is_empty() {
return Err("ai_result_reference.reference_id 不能为空".to_string());
}
let mut snapshot = get_ai_task_snapshot_tx(ctx, &input.task_id)?;
ensure_ai_task_can_transition(snapshot.status)?;
let reference = AiResultReferenceSnapshot {
result_ref_id: generate_ai_result_ref_id(input.created_at_micros),
task_id: input.task_id.trim().to_string(),
reference_kind: input.reference_kind,
reference_id,
label: normalize_optional_text(input.label),
created_at_micros: input.created_at_micros,
};
ctx.db
.ai_result_reference()
.insert(build_ai_result_reference_row(&reference));
snapshot.result_references.push(reference);
snapshot.updated_at_micros = input.created_at_micros;
snapshot.version += 1;
persist_ai_task_snapshot(ctx, &snapshot)?;
Ok(snapshot)
}
fn complete_ai_task_tx(
ctx: &ReducerContext,
input: AiTaskFinishInput,
) -> Result<AiTaskSnapshot, String> {
let mut snapshot = get_ai_task_snapshot_tx(ctx, &input.task_id)?;
ensure_ai_task_can_transition(snapshot.status)?;
snapshot.status = AiTaskStatus::Completed;
snapshot.completed_at_micros = Some(input.completed_at_micros);
snapshot.updated_at_micros = input.completed_at_micros;
snapshot.version += 1;
persist_ai_task_snapshot(ctx, &snapshot)?;
Ok(snapshot)
}
fn fail_ai_task_tx(
ctx: &ReducerContext,
input: AiTaskFailureInput,
) -> Result<AiTaskSnapshot, String> {
let failure_message = input.failure_message.trim().to_string();
if failure_message.is_empty() {
return Err("ai_task.failure_message 不能为空".to_string());
}
let mut snapshot = get_ai_task_snapshot_tx(ctx, &input.task_id)?;
ensure_ai_task_can_transition(snapshot.status)?;
snapshot.status = AiTaskStatus::Failed;
snapshot.failure_message = Some(failure_message);
snapshot.completed_at_micros = Some(input.completed_at_micros);
snapshot.updated_at_micros = input.completed_at_micros;
snapshot.version += 1;
persist_ai_task_snapshot(ctx, &snapshot)?;
Ok(snapshot)
}
fn cancel_ai_task_tx(
ctx: &ReducerContext,
input: AiTaskCancelInput,
) -> Result<AiTaskSnapshot, String> {
let mut snapshot = get_ai_task_snapshot_tx(ctx, &input.task_id)?;
ensure_ai_task_can_transition(snapshot.status)?;
snapshot.status = AiTaskStatus::Cancelled;
snapshot.completed_at_micros = Some(input.completed_at_micros);
snapshot.updated_at_micros = input.completed_at_micros;
snapshot.version += 1;
persist_ai_task_snapshot(ctx, &snapshot)?;
Ok(snapshot)
}
fn get_ai_task_snapshot_tx(ctx: &ReducerContext, task_id: &str) -> Result<AiTaskSnapshot, String> {
let row = ctx
.db
.ai_task()
.task_id()
.find(&task_id.trim().to_string())
.ok_or_else(|| "ai_task 不存在".to_string())?;
Ok(build_ai_task_snapshot_from_row(ctx, &row))
}
fn persist_ai_task_snapshot(ctx: &ReducerContext, snapshot: &AiTaskSnapshot) -> Result<(), String> {
ctx.db.ai_task().task_id().delete(&snapshot.task_id);
ctx.db.ai_task().insert(build_ai_task_row(snapshot));
replace_ai_task_stages(ctx, &snapshot.task_id, &snapshot.stages);
Ok(())
}
fn replace_ai_task_stages(ctx: &ReducerContext, task_id: &str, stages: &[AiTaskStageSnapshot]) {
let stage_ids = ctx
.db
.ai_task_stage()
.iter()
.filter(|row| row.task_id == task_id)
.map(|row| row.task_stage_id.clone())
.collect::<Vec<_>>();
for stage_id in stage_ids {
ctx.db.ai_task_stage().task_stage_id().delete(&stage_id);
}
for stage in stages {
ctx.db
.ai_task_stage()
.insert(build_ai_task_stage_row(task_id, stage));
}
}
fn collect_ai_stage_text_output(
ctx: &ReducerContext,
task_id: &str,
stage_kind: AiTaskStageKind,
) -> Option<String> {
let mut chunks = ctx
.db
.ai_text_chunk()
.iter()
.filter(|row| row.task_id == task_id && row.stage_kind == stage_kind)
.map(|row| build_ai_text_chunk_snapshot_from_row(&row))
.collect::<Vec<_>>();
chunks.sort_by_key(|chunk| chunk.sequence);
let aggregated = chunks
.into_iter()
.map(|chunk| chunk.delta_text)
.collect::<Vec<_>>()
.join("");
if aggregated.trim().is_empty() {
None
} else {
Some(aggregated)
}
}
fn ensure_ai_task_can_transition(status: AiTaskStatus) -> Result<(), String> {
if matches!(
status,
AiTaskStatus::Completed | AiTaskStatus::Failed | AiTaskStatus::Cancelled
) {
Err("当前 ai_task 状态不允许执行该操作".to_string())
} else {
Ok(())
}
}
fn build_ai_task_snapshot_from_create_input(input: &AiTaskCreateInput) -> AiTaskSnapshot {
AiTaskSnapshot {
task_id: input.task_id.trim().to_string(),
task_kind: input.task_kind,
owner_user_id: input.owner_user_id.trim().to_string(),
request_label: input.request_label.trim().to_string(),
source_module: input.source_module.trim().to_string(),
source_entity_id: normalize_optional_text(input.source_entity_id.clone()),
request_payload_json: normalize_optional_text(input.request_payload_json.clone()),
status: AiTaskStatus::Pending,
failure_message: None,
stages: input
.stages
.iter()
.map(|stage| AiTaskStageSnapshot {
stage_kind: stage.stage_kind,
label: stage.label.trim().to_string(),
detail: stage.detail.trim().to_string(),
order: stage.order,
status: AiTaskStageStatus::Pending,
text_output: None,
structured_payload_json: None,
warning_messages: Vec::new(),
started_at_micros: None,
completed_at_micros: None,
})
.collect(),
result_references: Vec::new(),
latest_text_output: None,
latest_structured_payload_json: None,
version: INITIAL_AI_TASK_VERSION,
created_at_micros: input.created_at_micros,
started_at_micros: None,
completed_at_micros: None,
updated_at_micros: input.created_at_micros,
}
}
fn build_ai_task_row(snapshot: &AiTaskSnapshot) -> AiTask {
AiTask {
task_id: snapshot.task_id.clone(),
task_kind: snapshot.task_kind,
owner_user_id: snapshot.owner_user_id.clone(),
request_label: snapshot.request_label.clone(),
source_module: snapshot.source_module.clone(),
source_entity_id: snapshot.source_entity_id.clone(),
request_payload_json: snapshot.request_payload_json.clone(),
status: snapshot.status,
failure_message: snapshot.failure_message.clone(),
latest_text_output: snapshot.latest_text_output.clone(),
latest_structured_payload_json: snapshot.latest_structured_payload_json.clone(),
version: snapshot.version,
created_at: Timestamp::from_micros_since_unix_epoch(snapshot.created_at_micros),
started_at: snapshot
.started_at_micros
.map(Timestamp::from_micros_since_unix_epoch),
completed_at: snapshot
.completed_at_micros
.map(Timestamp::from_micros_since_unix_epoch),
updated_at: Timestamp::from_micros_since_unix_epoch(snapshot.updated_at_micros),
}
}
fn build_ai_task_snapshot_from_row(ctx: &ReducerContext, row: &AiTask) -> AiTaskSnapshot {
let mut stages = ctx
.db
.ai_task_stage()
.iter()
.filter(|stage| stage.task_id == row.task_id)
.map(|stage| build_ai_task_stage_snapshot_from_row(&stage))
.collect::<Vec<_>>();
stages.sort_by_key(|stage| stage.order);
let mut result_references = ctx
.db
.ai_result_reference()
.iter()
.filter(|reference| reference.task_id == row.task_id)
.map(|reference| build_ai_result_reference_snapshot_from_row(&reference))
.collect::<Vec<_>>();
result_references.sort_by_key(|reference| reference.created_at_micros);
AiTaskSnapshot {
task_id: row.task_id.clone(),
task_kind: row.task_kind,
owner_user_id: row.owner_user_id.clone(),
request_label: row.request_label.clone(),
source_module: row.source_module.clone(),
source_entity_id: row.source_entity_id.clone(),
request_payload_json: row.request_payload_json.clone(),
status: row.status,
failure_message: row.failure_message.clone(),
stages,
result_references,
latest_text_output: row.latest_text_output.clone(),
latest_structured_payload_json: row.latest_structured_payload_json.clone(),
version: row.version,
created_at_micros: row.created_at.to_micros_since_unix_epoch(),
started_at_micros: row
.started_at
.map(|value| value.to_micros_since_unix_epoch()),
completed_at_micros: row
.completed_at
.map(|value| value.to_micros_since_unix_epoch()),
updated_at_micros: row.updated_at.to_micros_since_unix_epoch(),
}
}
fn build_ai_task_stage_row(task_id: &str, snapshot: &AiTaskStageSnapshot) -> AiTaskStage {
AiTaskStage {
task_stage_id: generate_ai_task_stage_id(task_id, snapshot.stage_kind),
task_id: task_id.to_string(),
stage_kind: snapshot.stage_kind,
label: snapshot.label.clone(),
detail: snapshot.detail.clone(),
stage_order: snapshot.order,
status: snapshot.status,
text_output: snapshot.text_output.clone(),
structured_payload_json: snapshot.structured_payload_json.clone(),
warning_messages: snapshot.warning_messages.clone(),
started_at: snapshot
.started_at_micros
.map(Timestamp::from_micros_since_unix_epoch),
completed_at: snapshot
.completed_at_micros
.map(Timestamp::from_micros_since_unix_epoch),
}
}
fn build_ai_task_stage_snapshot_from_row(row: &AiTaskStage) -> AiTaskStageSnapshot {
AiTaskStageSnapshot {
stage_kind: row.stage_kind,
label: row.label.clone(),
detail: row.detail.clone(),
order: row.stage_order,
status: row.status,
text_output: row.text_output.clone(),
structured_payload_json: row.structured_payload_json.clone(),
warning_messages: row.warning_messages.clone(),
started_at_micros: row
.started_at
.map(|value| value.to_micros_since_unix_epoch()),
completed_at_micros: row
.completed_at
.map(|value| value.to_micros_since_unix_epoch()),
}
}
fn build_ai_text_chunk_row(snapshot: &AiTextChunkSnapshot) -> AiTextChunk {
AiTextChunk {
text_chunk_row_id: format!(
"{}{}_{}_{}",
AI_TEXT_CHUNK_ID_PREFIX,
snapshot.task_id,
snapshot.stage_kind.as_str(),
snapshot.sequence
),
chunk_id: snapshot.chunk_id.clone(),
task_id: snapshot.task_id.clone(),
stage_kind: snapshot.stage_kind,
sequence: snapshot.sequence,
delta_text: snapshot.delta_text.clone(),
created_at: Timestamp::from_micros_since_unix_epoch(snapshot.created_at_micros),
}
}
fn build_ai_text_chunk_snapshot_from_row(row: &AiTextChunk) -> AiTextChunkSnapshot {
AiTextChunkSnapshot {
chunk_id: row.chunk_id.clone(),
task_id: row.task_id.clone(),
stage_kind: row.stage_kind,
sequence: row.sequence,
delta_text: row.delta_text.clone(),
created_at_micros: row.created_at.to_micros_since_unix_epoch(),
}
}
fn build_ai_result_reference_row(snapshot: &AiResultReferenceSnapshot) -> AiResultReference {
AiResultReference {
result_reference_row_id: format!(
"{}{}_{}",
AI_RESULT_REF_ID_PREFIX, snapshot.task_id, snapshot.result_ref_id
),
result_ref_id: snapshot.result_ref_id.clone(),
task_id: snapshot.task_id.clone(),
reference_kind: snapshot.reference_kind,
reference_id: snapshot.reference_id.clone(),
label: snapshot.label.clone(),
created_at: Timestamp::from_micros_since_unix_epoch(snapshot.created_at_micros),
}
}
fn build_ai_result_reference_snapshot_from_row(
row: &AiResultReference,
) -> AiResultReferenceSnapshot {
AiResultReferenceSnapshot {
result_ref_id: row.result_ref_id.clone(),
task_id: row.task_id.clone(),
reference_kind: row.reference_kind,
reference_id: row.reference_id.clone(),
label: row.label.clone(),
created_at_micros: row.created_at.to_micros_since_unix_epoch(),
}
}

View File

@@ -0,0 +1,305 @@
#[spacetimedb::table(
accessor = asset_object,
index(accessor = by_bucket_object_key, btree(columns = [bucket, object_key]))
)]
pub struct AssetObject {
#[primary_key]
asset_object_id: String,
// 正式对象定位固定拆成 bucket + object_key 两列,避免后续再从单字符串路径做 schema 拆分。
bucket: String,
object_key: String,
access_policy: AssetObjectAccessPolicy,
content_type: Option<String>,
content_length: u64,
content_hash: Option<String>,
version: u32,
source_job_id: Option<String>,
owner_user_id: Option<String>,
profile_id: Option<String>,
entity_id: Option<String>,
#[index(btree)]
asset_kind: String,
created_at: Timestamp,
updated_at: Timestamp,
}
#[spacetimedb::table(
accessor = asset_entity_binding,
index(accessor = by_entity_slot, btree(columns = [entity_kind, entity_id, slot])),
index(accessor = by_asset_object_id, btree(columns = [asset_object_id]))
)]
pub struct AssetEntityBinding {
#[primary_key]
binding_id: String,
asset_object_id: String,
entity_kind: String,
entity_id: String,
slot: String,
asset_kind: String,
owner_user_id: Option<String>,
profile_id: Option<String>,
created_at: Timestamp,
updated_at: Timestamp,
}
// reducer 负责固定资产对象的正式写规则,供后续内部模块逻辑复用。
#[spacetimedb::reducer]
pub fn confirm_asset_object(
ctx: &ReducerContext,
input: AssetObjectUpsertInput,
) -> Result<(), String> {
upsert_asset_object(ctx, input).map(|_| ())
}
// procedure 面向 Axum 同步确认接口,返回最终持久化后的对象记录,避免 HTTP 层再额外查询 private table。
#[spacetimedb::procedure]
pub fn confirm_asset_object_and_return(
ctx: &mut ProcedureContext,
input: AssetObjectUpsertInput,
) -> AssetObjectProcedureResult {
match ctx.try_with_tx(|tx| upsert_asset_object(tx, input.clone())) {
Ok(record) => AssetObjectProcedureResult {
ok: true,
record: Some(record),
error_message: None,
},
Err(message) => AssetObjectProcedureResult {
ok: false,
record: None,
error_message: Some(message),
},
}
}
// reducer 负责把已确认对象绑定到实体槽位,强业务资产表稳定前先用通用绑定表承接关系。
#[spacetimedb::reducer]
pub fn bind_asset_object_to_entity(
ctx: &ReducerContext,
input: AssetEntityBindingInput,
) -> Result<(), String> {
upsert_asset_entity_binding(ctx, input).map(|_| ())
}
// procedure 面向 Axum 同步绑定接口,返回最终绑定快照,避免 HTTP 层读取 private table。
#[spacetimedb::procedure]
pub fn bind_asset_object_to_entity_and_return(
ctx: &mut ProcedureContext,
input: AssetEntityBindingInput,
) -> AssetEntityBindingProcedureResult {
match ctx.try_with_tx(|tx| upsert_asset_entity_binding(tx, input.clone())) {
Ok(record) => AssetEntityBindingProcedureResult {
ok: true,
record: Some(record),
error_message: None,
},
Err(message) => AssetEntityBindingProcedureResult {
ok: false,
record: None,
error_message: Some(message),
},
}
}
fn upsert_asset_object(
ctx: &ReducerContext,
input: AssetObjectUpsertInput,
) -> Result<AssetObjectUpsertSnapshot, String> {
validate_asset_object_fields(
&input.bucket,
&input.object_key,
&input.asset_kind,
input.version,
)
.map_err(|error| error.to_string())?;
let updated_at = Timestamp::from_micros_since_unix_epoch(input.updated_at_micros);
// 这里先保持最小可发布实现:查重语义已经冻结,后续再把实现优化回组合索引扫描。
let current = ctx
.db
.asset_object()
.iter()
.find(|row| row.bucket == input.bucket && row.object_key == input.object_key);
let snapshot = match current {
Some(existing) => {
ctx.db
.asset_object()
.asset_object_id()
.delete(&existing.asset_object_id);
let row = AssetObject {
asset_object_id: existing.asset_object_id.clone(),
bucket: input.bucket.clone(),
object_key: input.object_key.clone(),
access_policy: input.access_policy,
content_type: input.content_type.clone(),
content_length: input.content_length,
content_hash: input.content_hash.clone(),
version: input.version,
source_job_id: input.source_job_id.clone(),
owner_user_id: input.owner_user_id.clone(),
profile_id: input.profile_id.clone(),
entity_id: input.entity_id.clone(),
asset_kind: input.asset_kind.clone(),
created_at: existing.created_at,
updated_at,
};
ctx.db.asset_object().insert(row);
AssetObjectUpsertSnapshot {
asset_object_id: existing.asset_object_id,
bucket: input.bucket,
object_key: input.object_key,
access_policy: input.access_policy,
content_type: input.content_type,
content_length: input.content_length,
content_hash: input.content_hash,
version: input.version,
source_job_id: input.source_job_id,
owner_user_id: input.owner_user_id,
profile_id: input.profile_id,
entity_id: input.entity_id,
asset_kind: input.asset_kind,
created_at_micros: existing.created_at.to_micros_since_unix_epoch(),
updated_at_micros: input.updated_at_micros,
}
}
None => {
let created_at = updated_at;
let row = AssetObject {
asset_object_id: input.asset_object_id.clone(),
bucket: input.bucket.clone(),
object_key: input.object_key.clone(),
access_policy: input.access_policy,
content_type: input.content_type.clone(),
content_length: input.content_length,
content_hash: input.content_hash.clone(),
version: input.version,
source_job_id: input.source_job_id.clone(),
owner_user_id: input.owner_user_id.clone(),
profile_id: input.profile_id.clone(),
entity_id: input.entity_id.clone(),
asset_kind: input.asset_kind.clone(),
created_at,
updated_at,
};
ctx.db.asset_object().insert(row);
AssetObjectUpsertSnapshot {
asset_object_id: input.asset_object_id,
bucket: input.bucket,
object_key: input.object_key,
access_policy: input.access_policy,
content_type: input.content_type,
content_length: input.content_length,
content_hash: input.content_hash,
version: input.version,
source_job_id: input.source_job_id,
owner_user_id: input.owner_user_id,
profile_id: input.profile_id,
entity_id: input.entity_id,
asset_kind: input.asset_kind,
created_at_micros: input.updated_at_micros,
updated_at_micros: input.updated_at_micros,
}
}
};
Ok(snapshot)
}
fn upsert_asset_entity_binding(
ctx: &ReducerContext,
input: AssetEntityBindingInput,
) -> Result<AssetEntityBindingSnapshot, String> {
validate_asset_entity_binding_fields(
&input.binding_id,
&input.asset_object_id,
&input.entity_kind,
&input.entity_id,
&input.slot,
&input.asset_kind,
)
.map_err(|error| error.to_string())?;
if ctx
.db
.asset_object()
.asset_object_id()
.find(&input.asset_object_id)
.is_none()
{
return Err("asset_entity_binding.asset_object_id 对应的 asset_object 不存在".to_string());
}
let updated_at = Timestamp::from_micros_since_unix_epoch(input.updated_at_micros);
// 首版绑定按 entity_kind + entity_id + slot 幂等定位,后续访问量明确后再改为组合索引扫描。
let current = ctx.db.asset_entity_binding().iter().find(|row| {
row.entity_kind == input.entity_kind
&& row.entity_id == input.entity_id
&& row.slot == input.slot
});
let snapshot = match current {
Some(existing) => {
ctx.db
.asset_entity_binding()
.binding_id()
.delete(&existing.binding_id);
let row = AssetEntityBinding {
binding_id: existing.binding_id.clone(),
asset_object_id: input.asset_object_id.clone(),
entity_kind: input.entity_kind.clone(),
entity_id: input.entity_id.clone(),
slot: input.slot.clone(),
asset_kind: input.asset_kind.clone(),
owner_user_id: input.owner_user_id.clone(),
profile_id: input.profile_id.clone(),
created_at: existing.created_at,
updated_at,
};
ctx.db.asset_entity_binding().insert(row);
AssetEntityBindingSnapshot {
binding_id: existing.binding_id,
asset_object_id: input.asset_object_id,
entity_kind: input.entity_kind,
entity_id: input.entity_id,
slot: input.slot,
asset_kind: input.asset_kind,
owner_user_id: input.owner_user_id,
profile_id: input.profile_id,
created_at_micros: existing.created_at.to_micros_since_unix_epoch(),
updated_at_micros: input.updated_at_micros,
}
}
None => {
let created_at = updated_at;
let row = AssetEntityBinding {
binding_id: input.binding_id.clone(),
asset_object_id: input.asset_object_id.clone(),
entity_kind: input.entity_kind.clone(),
entity_id: input.entity_id.clone(),
slot: input.slot.clone(),
asset_kind: input.asset_kind.clone(),
owner_user_id: input.owner_user_id.clone(),
profile_id: input.profile_id.clone(),
created_at,
updated_at,
};
ctx.db.asset_entity_binding().insert(row);
AssetEntityBindingSnapshot {
binding_id: input.binding_id,
asset_object_id: input.asset_object_id,
entity_kind: input.entity_kind,
entity_id: input.entity_id,
slot: input.slot,
asset_kind: input.asset_kind,
owner_user_id: input.owner_user_id,
profile_id: input.profile_id,
created_at_micros: input.updated_at_micros,
updated_at_micros: input.updated_at_micros,
}
}
};
Ok(snapshot)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct ResolveNpcBattleInteractionInput {
pub npc_interaction: ResolveNpcInteractionInput,
pub story_session_id: String,
pub actor_user_id: String,
pub battle_state_id: Option<String>,
pub player_hp: i32,
pub player_max_hp: i32,
pub player_mana: i32,
pub player_max_mana: i32,
pub target_hp: i32,
pub target_max_hp: i32,
pub experience_reward: u32,
pub reward_items: Vec<RuntimeItemRewardItemSnapshot>,
}
// 输出同时返回 NPC 交互结果与 battle_state 快照,避免 Axum 再回头读取 private table。
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct NpcBattleInteractionResult {
pub interaction: module_npc::NpcInteractionResult,
pub battle_state: BattleStateSnapshot,
}
#[derive(Clone, Debug, PartialEq, Eq, SpacetimeType)]
pub struct NpcBattleInteractionProcedureResult {
pub ok: bool,
pub result: Option<NpcBattleInteractionResult>,
pub error_message: Option<String>,
}

View File

@@ -0,0 +1,23 @@
// 当前阶段先落可发布的最小模块入口,后续再补对象确认、业务绑定与任务编排 reducer。
#[spacetimedb::reducer(init)]
pub fn init(_ctx: &ReducerContext) {
log::info!(
"spacetime-module 初始化完成asset_object 已固定 bucket/object_key 双列主存储口径runtime_setting 已固定默认音量={} 和默认主题={}battle_state 前缀={},战斗初始版本={}npc_state 前缀={}npc 招募阈值={}story_session 前缀={}story_event 前缀={}inventory_slot 前缀={}inventory_mutation 前缀={}quest_log 前缀={}treasure_record 前缀={}player_progression 与 chapter_progression 已接入成长真相表M5 custom_world_profile/session/agent/gallery 首批表骨架已接入,默认对象 ID 前缀={},默认绑定 ID 前缀={},资产初始版本={},故事会话初始版本={}",
DEFAULT_MUSIC_VOLUME,
DEFAULT_PLATFORM_THEME.as_str(),
BATTLE_STATE_ID_PREFIX,
INITIAL_BATTLE_VERSION,
NPC_STATE_ID_PREFIX,
NPC_RECRUIT_AFFINITY_THRESHOLD,
STORY_SESSION_ID_PREFIX,
STORY_EVENT_ID_PREFIX,
INVENTORY_SLOT_ID_PREFIX,
INVENTORY_MUTATION_ID_PREFIX,
QUEST_LOG_ID_PREFIX,
TREASURE_RECORD_ID_PREFIX,
ASSET_OBJECT_ID_PREFIX,
ASSET_BINDING_ID_PREFIX,
INITIAL_ASSET_OBJECT_VERSION,
INITIAL_STORY_SESSION_VERSION
);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,80 @@
[CmdletBinding()]
param(
[Alias("h")]
[switch]$Help,
[switch]$RunSmoke,
[switch]$RunSpacetimeBuild
)
$ErrorActionPreference = "Stop"
function Write-Usage {
@(
'Usage:',
' ./server-rs/scripts/m7-preflight.ps1',
' ./server-rs/scripts/m7-preflight.ps1 -RunSmoke',
' ./server-rs/scripts/m7-preflight.ps1 -RunSpacetimeBuild',
'',
'Notes:',
' 1. Run M7 cutover preflight checks for Rust backend',
' 2. Default checks are non-destructive and do not publish or clear SpacetimeDB data',
' 3. -RunSmoke starts a temporary api-server and verifies /healthz contract',
' 4. -RunSpacetimeBuild requires spacetime CLI and only builds the module'
) -join [Environment]::NewLine
}
if ($Help) {
Write-Usage
exit 0
}
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
$serverRsDir = Split-Path -Parent $scriptDir
$repoRoot = Split-Path -Parent $serverRsDir
$manifestPath = Join-Path $serverRsDir "Cargo.toml"
$modulePath = Join-Path $serverRsDir "crates\spacetime-module"
if (-not (Test-Path $manifestPath)) {
throw "Missing server-rs/Cargo.toml, cannot start M7 preflight."
}
Write-Host "[m7:preflight] repo root: $repoRoot"
Write-Host "[m7:preflight] server-rs: $serverRsDir"
Push-Location $serverRsDir
try {
Write-Host "[m7:preflight] step: cargo check -p spacetime-module"
cargo check -p spacetime-module --manifest-path $manifestPath
Write-Host "[m7:preflight] step: cargo check -p api-server"
cargo check -p api-server --manifest-path $manifestPath
Write-Host "[m7:preflight] step: cargo test -p shared-contracts"
cargo test -p shared-contracts --manifest-path $manifestPath
if ($RunSpacetimeBuild) {
$spacetimeCommand = Get-Command spacetime -ErrorAction SilentlyContinue
if ($null -eq $spacetimeCommand) {
throw "Missing spacetime CLI, cannot run spacetime build."
}
Write-Host "[m7:preflight] step: spacetime build --debug"
Push-Location $modulePath
try {
& $spacetimeCommand.Source build --debug
}
finally {
Pop-Location
}
}
}
finally {
Pop-Location
}
if ($RunSmoke) {
Write-Host "[m7:preflight] step: server-rs smoke"
& (Join-Path $serverRsDir "scripts\smoke.ps1")
}
Write-Host "[m7:preflight] all checks passed"