fix: use char-based slicing instead of byte-based to handle UTF-8
Byte index slicing like `&text[..100.min(text.len())]` panics when the byte index falls inside a multi-byte UTF-8 character (e.g., Chinese). Changed to `text.chars().take(100).collect::<String>()` for safe character-based truncation.
This commit is contained in:
parent
075b92f231
commit
a4399037ac
@ -160,7 +160,7 @@ impl LLMProvider for OpenAIProvider {
|
||||
for (j, item) in content.iter().enumerate() {
|
||||
if item.get("type").and_then(|t| t.as_str()) == Some("image_url") {
|
||||
if let Some(url_str) = item.get("image_url").and_then(|u| u.get("url")).and_then(|v| v.as_str()) {
|
||||
let prefix = &url_str[..20.min(url_str.len())];
|
||||
let prefix: String = url_str.chars().take(20).collect();
|
||||
tracing::debug!(msg_idx = i, item_idx = j, image_prefix = %prefix, image_url_len = %url_str.len(), "Image in LLM request (first 20 bytes shown)");
|
||||
}
|
||||
}
|
||||
@ -188,8 +188,8 @@ impl LLMProvider for OpenAIProvider {
|
||||
// Debug: Log LLM response (only in debug builds)
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let resp_preview = &text[..text.len().min(100)];
|
||||
tracing::debug!(status = %status, response_preview = %resp_preview, response_len = %text.len(), "LLM response (first 100 bytes shown)");
|
||||
let resp_preview: String = text.chars().take(100).collect();
|
||||
tracing::debug!(status = %status, response_preview = %resp_preview, response_len = %text.len(), "LLM response (first 100 chars shown)");
|
||||
}
|
||||
|
||||
if !status.is_success() {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user