feat: migrate runtime backend to node server
This commit is contained in:
@@ -1,10 +1,66 @@
|
||||
import type {TextStreamOptions} from './aiTypes';
|
||||
import { fetchWithApiAuth } from './apiClient';
|
||||
|
||||
const ENV: Partial<ImportMetaEnv> = import.meta.env ?? {};
|
||||
|
||||
const API_BASE_URL = ENV.VITE_LLM_PROXY_BASE_URL || '/api/llm';
|
||||
const MODEL = ENV.VITE_LLM_MODEL || 'doubao-1-5-pro-32k-character-250715';
|
||||
const ENABLE_LLM_DEBUG_LOG = Boolean(ENV.DEV) || ENV.VITE_LLM_DEBUG_LOG === 'true';
|
||||
type NodeProcessLike = {
|
||||
env?: Record<string, string | undefined>;
|
||||
};
|
||||
|
||||
function getNodeEnv() {
|
||||
if (typeof window !== 'undefined') {
|
||||
return {};
|
||||
}
|
||||
|
||||
return (
|
||||
(globalThis as typeof globalThis & {process?: NodeProcessLike}).process?.env
|
||||
?? {}
|
||||
);
|
||||
}
|
||||
|
||||
function normalizeBaseUrl(value: string) {
|
||||
return value.replace(/\/+$/u, '');
|
||||
}
|
||||
|
||||
function coerceBoolean(value: string | undefined) {
|
||||
return value?.trim().toLowerCase() === 'true';
|
||||
}
|
||||
|
||||
function resolveHeaders(headers?: HeadersInit) {
|
||||
const nextHeaders: Record<string, string> = {};
|
||||
|
||||
if (headers instanceof Headers) {
|
||||
headers.forEach((value, key) => {
|
||||
nextHeaders[key] = value;
|
||||
});
|
||||
} else if (Array.isArray(headers)) {
|
||||
for (const [key, value] of headers) {
|
||||
nextHeaders[key] = value;
|
||||
}
|
||||
} else if (headers) {
|
||||
Object.assign(nextHeaders, headers);
|
||||
}
|
||||
|
||||
return nextHeaders;
|
||||
}
|
||||
|
||||
const NODE_ENV = getNodeEnv();
|
||||
const IS_SERVER_RUNTIME = typeof window === 'undefined';
|
||||
const SERVER_API_KEY =
|
||||
NODE_ENV.LLM_API_KEY || NODE_ENV.ARK_API_KEY || NODE_ENV.VITE_LLM_API_KEY || '';
|
||||
const API_BASE_URL = IS_SERVER_RUNTIME
|
||||
? normalizeBaseUrl(
|
||||
NODE_ENV.LLM_BASE_URL || 'https://ark.cn-beijing.volces.com/api/v3',
|
||||
)
|
||||
: (ENV.VITE_LLM_PROXY_BASE_URL || '/api/llm');
|
||||
const MODEL = IS_SERVER_RUNTIME
|
||||
? (NODE_ENV.LLM_MODEL
|
||||
|| NODE_ENV.VITE_LLM_MODEL
|
||||
|| 'doubao-1-5-pro-32k-character-250715')
|
||||
: (ENV.VITE_LLM_MODEL || 'doubao-1-5-pro-32k-character-250715');
|
||||
const ENABLE_LLM_DEBUG_LOG = IS_SERVER_RUNTIME
|
||||
? coerceBoolean(NODE_ENV.LLM_DEBUG_LOG)
|
||||
: (Boolean(ENV.DEV) || ENV.VITE_LLM_DEBUG_LOG === 'true');
|
||||
|
||||
export interface PlainTextCompletionOptions {
|
||||
timeoutMs?: number;
|
||||
@@ -31,9 +87,16 @@ export function resolveTimeoutMs(rawValue: string | undefined, fallback: number)
|
||||
return Number.isFinite(parsed) && parsed > 0 ? Math.round(parsed) : fallback;
|
||||
}
|
||||
|
||||
export const REQUEST_TIMEOUT_MS = resolveTimeoutMs(ENV.VITE_LLM_REQUEST_TIMEOUT_MS, 15000);
|
||||
export const REQUEST_TIMEOUT_MS = resolveTimeoutMs(
|
||||
IS_SERVER_RUNTIME
|
||||
? (NODE_ENV.LLM_REQUEST_TIMEOUT_MS || NODE_ENV.VITE_LLM_REQUEST_TIMEOUT_MS)
|
||||
: ENV.VITE_LLM_REQUEST_TIMEOUT_MS,
|
||||
15000,
|
||||
);
|
||||
export const CUSTOM_WORLD_REQUEST_TIMEOUT_MS = resolveTimeoutMs(
|
||||
ENV.VITE_LLM_CUSTOM_WORLD_TIMEOUT_MS,
|
||||
IS_SERVER_RUNTIME
|
||||
? (NODE_ENV.LLM_CUSTOM_WORLD_TIMEOUT_MS || NODE_ENV.VITE_LLM_CUSTOM_WORLD_TIMEOUT_MS)
|
||||
: ENV.VITE_LLM_CUSTOM_WORLD_TIMEOUT_MS,
|
||||
Math.max(REQUEST_TIMEOUT_MS, 120000),
|
||||
);
|
||||
|
||||
@@ -57,6 +120,22 @@ function normalizeLlmError(error: unknown): never {
|
||||
throw error;
|
||||
}
|
||||
|
||||
function requestLlmEndpoint(input: string, init: RequestInit = {}) {
|
||||
const headers = resolveHeaders(init.headers);
|
||||
if (IS_SERVER_RUNTIME && SERVER_API_KEY.trim()) {
|
||||
headers.Authorization = `Bearer ${SERVER_API_KEY.trim()}`;
|
||||
}
|
||||
|
||||
const nextInit = {
|
||||
...init,
|
||||
headers,
|
||||
} satisfies RequestInit;
|
||||
|
||||
return IS_SERVER_RUNTIME
|
||||
? fetch(input, nextInit)
|
||||
: fetchWithApiAuth(input, nextInit);
|
||||
}
|
||||
|
||||
export function isLlmConnectivityError(error: unknown): error is LlmConnectivityError {
|
||||
return error instanceof LlmConnectivityError;
|
||||
}
|
||||
@@ -99,7 +178,7 @@ async function requestMessageContent(
|
||||
try {
|
||||
logLlmDebug(`[LLM:${debugLabel}] prompt text`, rawPromptText);
|
||||
|
||||
const response = await fetch(`${API_BASE_URL}/chat/completions`, {
|
||||
const response = await requestLlmEndpoint(`${API_BASE_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {'Content-Type': 'application/json'},
|
||||
body: JSON.stringify(requestBody),
|
||||
@@ -175,7 +254,7 @@ export async function streamPlainTextCompletion(
|
||||
const timeout = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
|
||||
|
||||
try {
|
||||
const response = await fetch(`${API_BASE_URL}/chat/completions`, {
|
||||
const response = await requestLlmEndpoint(`${API_BASE_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {'Content-Type': 'application/json'},
|
||||
body: JSON.stringify({
|
||||
|
||||
Reference in New Issue
Block a user