60 lines
1.6 KiB
Rust
60 lines
1.6 KiB
Rust
use platform_llm::{
|
|
LlmClient, LlmMessage, LlmMessageContentPart, LlmMessageRole, LlmTextProtocol, LlmTextRequest,
|
|
LlmTextResponse,
|
|
};
|
|
|
|
use crate::error::PlatformAgentError;
|
|
|
|
pub const CREATIVE_AGENT_GPT5_MODEL: &str = "gpt-5";
|
|
|
|
#[derive(Clone)]
|
|
pub struct Gpt5ResponsesAgentClient {
|
|
llm_client: LlmClient,
|
|
}
|
|
|
|
impl Gpt5ResponsesAgentClient {
|
|
pub fn new(llm_client: LlmClient) -> Self {
|
|
Self { llm_client }
|
|
}
|
|
|
|
pub async fn request(
|
|
&self,
|
|
system_prompt: impl Into<String>,
|
|
user_text: impl Into<String>,
|
|
image_urls: Vec<String>,
|
|
) -> Result<LlmTextResponse, PlatformAgentError> {
|
|
let request = build_gpt5_multimodal_request(system_prompt, user_text, image_urls);
|
|
self.llm_client
|
|
.request_text(request)
|
|
.await
|
|
.map_err(Into::into)
|
|
}
|
|
}
|
|
|
|
pub fn build_gpt5_multimodal_request(
|
|
system_prompt: impl Into<String>,
|
|
user_text: impl Into<String>,
|
|
image_urls: Vec<String>,
|
|
) -> LlmTextRequest {
|
|
let mut user_parts = vec![LlmMessageContentPart::InputText {
|
|
text: user_text.into(),
|
|
}];
|
|
user_parts.extend(
|
|
image_urls
|
|
.into_iter()
|
|
.map(|image_url| LlmMessageContentPart::InputImage { image_url }),
|
|
);
|
|
|
|
LlmTextRequest {
|
|
model: Some(CREATIVE_AGENT_GPT5_MODEL.to_string()),
|
|
messages: vec![
|
|
LlmMessage::new(LlmMessageRole::System, system_prompt.into()),
|
|
LlmMessage::multimodal(LlmMessageRole::User, user_parts),
|
|
],
|
|
max_tokens: None,
|
|
request_timeout_ms: None,
|
|
enable_web_search: false,
|
|
protocol: LlmTextProtocol::Responses,
|
|
}
|
|
}
|