- Sort Score
- Num 10 results
- Language All
Results 1 - 6 of 6 for streamChat (0.09 seconds)
-
src/main/webapp/js/chat.js
// Add thinking indicator var thinkingId = addThinkingIndicator(); // Use SSE for streaming streamChat(message, thinkingId); } /** * Stream chat using Server-Sent Events */ function streamChat(message, thinkingId) { var url = config.streamUrl + '?message=' + encodeURIComponent(message); if (state.sessionId) {Created: Tue Mar 31 13:07:34 GMT 2026 - Last Modified: Thu Mar 19 01:36:02 GMT 2026 - 30.6K bytes - Click Count (0) -
src/main/java/org/codelibs/fess/llm/LlmClient.java
* @param request the chat request containing messages and parameters * @param callback the callback to receive streaming chunks * @throws LlmException if an error occurs during the request */ void streamChat(LlmChatRequest request, LlmStreamCallback callback); /** * Returns the name of this LLM client. * * @return the client name (e.g., "ollama", "openai", "gemini") */ String getName();Created: Tue Mar 31 13:07:34 GMT 2026 - Last Modified: Thu Mar 19 07:04:54 GMT 2026 - 7.3K bytes - Click Count (0) -
src/main/java/org/codelibs/fess/llm/LlmClientManager.java
* * @param request the chat request * @param callback the callback to receive streaming chunks * @throws LlmException if LLM is not available or an error occurs */ public void streamChat(final LlmChatRequest request, final LlmStreamCallback callback) { final long startTime = System.currentTimeMillis(); final String llmType = getLlmType(); if (logger.isDebugEnabled()) {Created: Tue Mar 31 13:07:34 GMT 2026 - Last Modified: Thu Mar 19 11:10:51 GMT 2026 - 17.4K bytes - Click Count (0) -
src/main/java/org/codelibs/fess/llm/AbstractLlmClient.java
* * Provides shared infrastructure (HTTP client, availability checking) and * default implementations of RAG workflow methods with injectable prompt templates. * Subclasses implement provider-specific chat/streamChat and checkAvailabilityNow. */ public abstract class AbstractLlmClient implements LlmClient { private static final Logger logger = LogManager.getLogger(AbstractLlmClient.class);
Created: Tue Mar 31 13:07:34 GMT 2026 - Last Modified: Sat Mar 21 06:04:58 GMT 2026 - 72K bytes - Click Count (0) -
src/test/java/org/codelibs/fess/llm/AbstractLlmClientTest.java
throw new LlmException("Test: no response configured"); } return new LlmChatResponse(chatResponseContent); } @Override public void streamChat(final LlmChatRequest request, final LlmStreamCallback callback) { if (streamChatCapture != null) { streamChatCapture.capture(request, callback); } else {
Created: Tue Mar 31 13:07:34 GMT 2026 - Last Modified: Thu Mar 19 07:04:54 GMT 2026 - 53K bytes - Click Count (0) -
docs_src/server_sent_events/tutorial005_py310.py
from pydantic import BaseModel app = FastAPI() class Prompt(BaseModel): text: str @app.post("/chat/stream", response_class=EventSourceResponse) async def stream_chat(prompt: Prompt) -> AsyncIterable[ServerSentEvent]: words = prompt.text.split() for word in words: yield ServerSentEvent(data=word, event="token")
Created: Sun Apr 05 07:19:11 GMT 2026 - Last Modified: Sun Mar 01 09:21:52 GMT 2026 - 528 bytes - Click Count (0)