Update Match3D/image-generation docs & code

Adds/updates documentation, assets and implementation for Match3D and puzzle image generation workflows. Key changes: decision logs and pitfalls updated to prefer VectorEngine Gemini for Match3D material sheets and to require edits (multipart) for 1:1 container reference images; guidance added for when to use APIMart vs VectorEngine. .env.example clarified APIMart/Responses config. Many new public assets and PPT visuals added. Code changes across frontend and backend: updated shared contracts, server-rs match3d/puzzle/image-generation handlers, VectorEngine/OpenAI image generation clients, and multiple React components/tests to handle UI/background/container image signing, edits workflow, and puzzle UI background resolution. Added src/services/puzzle-runtime/puzzleUiBackgroundSource.ts and related test updates. Includes notes about multipart HTTP/1.1 requirement and test/verification commands in docs.
This commit is contained in:
2026-05-14 20:34:45 +08:00
parent d33c937ebc
commit 548db78ca7
103 changed files with 6687 additions and 3270 deletions

View File

@@ -32,6 +32,13 @@ pub(crate) struct DownloadedOpenAiImage {
pub extension: String,
}
#[derive(Clone, Debug)]
pub(crate) struct OpenAiReferenceImage {
pub bytes: Vec<u8>,
pub mime_type: String,
pub file_name: String,
}
// 中文注释RPG、方洞等图片资产统一走 VectorEngine GPT-image-2-all避免把密钥或供应商协议暴露到前端。
pub(crate) fn require_openai_image_settings(
state: &AppState,
@@ -75,6 +82,8 @@ pub(crate) fn build_openai_image_http_client(
) -> Result<reqwest::Client, AppError> {
reqwest::Client::builder()
.timeout(Duration::from_millis(settings.request_timeout_ms))
// 中文注释:同一客户端也会承载 `/v1/images/edits` multipart 图生图请求,强制 HTTP/1.1 可避开部分网关对 HTTP/2 multipart 流的兼容问题。
.http1_only()
.build()
.map_err(|error| {
AppError::from_status(StatusCode::INTERNAL_SERVER_ERROR).with_details(json!({
@@ -157,6 +166,82 @@ pub(crate) async fn create_openai_image_generation(
)
}
pub(crate) async fn create_openai_image_edit(
http_client: &reqwest::Client,
settings: &OpenAiImageSettings,
prompt: &str,
negative_prompt: Option<&str>,
size: &str,
reference_image: &OpenAiReferenceImage,
failure_context: &str,
) -> Result<OpenAiGeneratedImages, AppError> {
let task_id = format!("vector-engine-edit-{}", current_utc_micros());
let image_part = reqwest::multipart::Part::bytes(reference_image.bytes.clone())
.file_name(reference_image.file_name.clone())
.mime_str(reference_image.mime_type.as_str())
.map_err(|error| {
map_openai_image_request_error(format!("{failure_context}:构造参考图失败:{error}"))
})?;
let form = reqwest::multipart::Form::new()
.part("image", image_part)
.text("model", GPT_IMAGE_2_MODEL.to_string())
.text(
"prompt",
build_prompt_with_negative(prompt, negative_prompt),
)
.text("n", "1")
.text("size", normalize_image_size(size));
let response = http_client
.post(vector_engine_images_edit_url(settings).as_str())
.header(
header::AUTHORIZATION,
format!("Bearer {}", settings.api_key),
)
.header(header::ACCEPT, "application/json")
.multipart(form)
.send()
.await
.map_err(|error| {
map_openai_image_request_error(format!(
"{failure_context}:创建图片编辑任务失败:{error}"
))
})?;
let response_status = response.status();
let response_text = response.text().await.map_err(|error| {
map_openai_image_request_error(format!("{failure_context}:读取图片编辑响应失败:{error}"))
})?;
if !response_status.is_success() {
return Err(map_openai_image_upstream_error(
response_status.as_u16(),
response_text.as_str(),
failure_context,
));
}
let response_json = parse_json_payload(response_text.as_str(), failure_context)?;
let actual_prompt = find_first_string_by_key(&response_json.payload, "revised_prompt")
.or_else(|| find_first_string_by_key(&response_json.payload, "actual_prompt"));
let image_urls = extract_image_urls(&response_json.payload);
if !image_urls.is_empty() {
let mut generated = download_images_from_urls(http_client, task_id, image_urls, 1).await?;
generated.actual_prompt = actual_prompt;
return Ok(generated);
}
let b64_images = extract_b64_images(&response_json.payload);
if !b64_images.is_empty() {
let mut generated = images_from_base64(task_id, b64_images, 1);
generated.actual_prompt = actual_prompt;
return Ok(generated);
}
Err(
AppError::from_status(StatusCode::BAD_GATEWAY).with_details(json!({
"provider": VECTOR_ENGINE_PROVIDER,
"message": format!("{failure_context}VectorEngine 未返回编辑图片"),
})),
)
}
pub(crate) fn build_openai_image_request_body(
prompt: &str,
negative_prompt: Option<&str>,
@@ -453,6 +538,14 @@ fn vector_engine_images_generation_url(settings: &OpenAiImageSettings) -> String
}
}
fn vector_engine_images_edit_url(settings: &OpenAiImageSettings) -> String {
if settings.base_url.ends_with("/v1") {
format!("{}/images/edits", settings.base_url)
} else {
format!("{}/v1/images/edits", settings.base_url)
}
}
fn normalize_downloaded_image_mime_type(content_type: &str) -> String {
let mime_type = content_type
.split(';')
@@ -530,6 +623,29 @@ mod tests {
assert!(body["prompt"].as_str().unwrap_or_default().contains("避免"));
}
#[test]
fn vector_engine_edit_url_uses_images_edits_endpoint() {
let root_settings = OpenAiImageSettings {
base_url: "https://vector.example".to_string(),
api_key: "test-key".to_string(),
request_timeout_ms: 180_000,
};
let v1_settings = OpenAiImageSettings {
base_url: "https://vector.example/v1".to_string(),
api_key: "test-key".to_string(),
request_timeout_ms: 180_000,
};
assert_eq!(
vector_engine_images_edit_url(&root_settings),
"https://vector.example/v1/images/edits"
);
assert_eq!(
vector_engine_images_edit_url(&v1_settings),
"https://vector.example/v1/images/edits"
);
}
#[test]
fn b64_json_response_decodes_png_image() {
let images = images_from_base64(