Files
Heimgeist/src/App.jsx

963 lines
36 KiB
React
Raw Normal View History

feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
// /Users/giers/Heimgeist/src/App.jsx
import React, { useEffect, useLayoutEffect, useMemo, useRef, useState } from 'react'
import { flushSync } from 'react-dom';
2025-08-22 23:42:34 +02:00
import TextareaAutosize from 'react-textarea-autosize';
import GeneralSettings from './GeneralSettings'
import InterfaceSettings from './InterfaceSettings'
import { markdownToHTML } from './markdown';
// Extract <think> or <thinking> block (first occurrence) and return { think, answer }
function splitThinkBlocks(text) {
if (!text) return { think: null, answer: '' };
const openTagRe = /<think(?:ing)?>/i;
const closeTagRe = /<\/think(?:ing)?>/i;
const openMatch = text.match(openTagRe);
if (!openMatch) {
// No opening <think> tag found, so all content is answer
return { think: null, answer: text };
}
const openTagIndex = openMatch.index;
const openTagLength = openMatch[0].length;
const answerPartBeforeThink = text.substring(0, openTagIndex).trim();
let contentAfterOpenTag = text.substring(openTagIndex + openTagLength);
const closeMatch = contentAfterOpenTag.match(closeTagRe);
let thinkInner = null;
let finalAnswer = answerPartBeforeThink;
if (closeMatch) {
// Both open and close tags are present
thinkInner = contentAfterOpenTag.substring(0, closeMatch.index).trim();
finalAnswer += contentAfterOpenTag.substring(closeMatch.index + closeMatch[0].length);
} else {
// Only open tag found (streaming case), take everything after it as think
thinkInner = contentAfterOpenTag.trim();
}
return { think: thinkInner || null, answer: finalAnswer.trim() };
}
// Renders assistant message with a collapsible "Thoughts" block (if present)
function AssistantMessageContent({ content, streamOutput }) {
const { think, answer } = splitThinkBlocks(content || '');
const [open, setOpen] = React.useState(false); // Closed by default
// If streaming, the button should appear as soon as 'think' content is detected
const showThinkButton = !!think;
return (
<div className="assistant-message">
{showThinkButton && (
<div className="assistant-thoughts">
<button
className="think-toggle"
onClick={() => setOpen(o => !o)}
aria-expanded={open ? 'true' : 'false'}
aria-controls="think-content"
>
<span className="think-toggle-icon" aria-hidden="true">{open ? '▾' : '▸'}</span>
Thoughts
</button>
{open && (
<div id="think-content" className="think-content" dangerouslySetInnerHTML={{ __html: markdownToHTML(think) }} />
)}
</div>
)}
<div className="msg-content" dangerouslySetInnerHTML={{ __html: markdownToHTML(answer || content || '') }} />
</div>
);
}
2025-08-22 23:42:34 +02:00
const API_URL_KEY = 'ollamaApiUrl';
const COLOR_SCHEME_KEY = 'colorScheme';
// Initial API value will be set by useEffect after settings are loaded
let API = import.meta.env.VITE_API_URL ?? 'http://127.0.0.1:8000';
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const TOP_ALIGN_OFFSET = 48; // match .chat padding + header height for exact top alignment (should be more dynamic depending on header height)
const BOTTOM_EPSILON = 24; // px tolerance for treating as bottom
2025-08-22 23:42:34 +02:00
export default function App() {
const [chatSessions, setChatSessions] = useState([])
const [activeSessionId, setActiveSessionId] = useState(null)
const [activeSidebarMode, setActiveSidebarMode] = useState('chats') // 'chats', 'dbs', 'settings'
const [activeSettingsSubmenu, setActiveSettingsSubmenu] = useState('General'); // 'General', 'Interface'
const [editingSessionId, setEditingSessionId] = useState(null); // ID of the session being edited
// Use currentSessionId for the actual chat operations
const [model, setModel] = useState('')
const [input, setInput] = useState('')
const chatRef = useRef(null)
const textareaRef = useRef(null); // Ref for the textarea
const [ollamaApiUrl, setOllamaApiUrl] = useState(API); // State for Ollama API URL
const [colorScheme, setColorScheme] = useState('Default'); // State for color scheme
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const [streamOutput, setStreamOutput] = useState(false);
const [isSending, setIsSending] = useState(false);
2025-08-22 23:42:34 +02:00
const [loading, setLoading] = useState(true); // Loading state for initial session fetch
const [unreadSessions, setUnreadSessions] = useState([]); // Track unread messages
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const [scrollPositions, setScrollPositions] = useState({}); // Store scroll positions for each session
// Persist userScrolledUp state per session + live ref for closures (streaming)
const [userScrolledUpState, setUserScrolledUpState] = useState({});
const userScrolledUpRef = useRef({});
// When a response arrives in a non-active chat, remember to scroll to the new ASSISTANT message on open
const [pendingScrollToLastUser, setPendingScrollToLastUser] = useState({}); // { [sessionId]: assistantMsgId }
// Live per-session scrollTop tracker to avoid races
const scrollTopsRef = useRef({});
// Live per-session previous scrollTop tracker to detect scroll direction
const prevScrollTopsRef = useRef({});
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
// Tip state: { [sessionId]: messageId }
const [newMsgTip, setNewMsgTip] = useState({});
const setUserScrolledUp = React.useCallback((sessionId, value) => {
setUserScrolledUpState(prev => {
const next = { ...prev, [sessionId]: value };
userScrolledUpRef.current = next;
return next;
});
}, []);
const activeRequestSessionId = useRef(null);
const justSentMessage = useRef(false);
const lastSentSessionRef = useRef(null);
2025-08-22 23:42:34 +02:00
const activeSessionIdRef = useRef(activeSessionId);
useEffect(() => {
activeSessionIdRef.current = activeSessionId;
}, [activeSessionId]);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
// Flag to ensure we only restore once per open of a chat
const restoredForRef = useRef(null);
2025-08-22 23:42:34 +02:00
// Sidebar resizing state
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const [sidebarWidth, setSidebarWidth] = useState(230);
2025-08-22 23:42:34 +02:00
const [isResizing, setIsResizing] = useState(false);
const startResizing = React.useCallback((mouseDownEvent) => {
setIsResizing(true);
}, []);
const stopResizing = React.useCallback(() => {
setIsResizing(false);
}, []);
const resizeSidebar = React.useCallback((mouseMoveEvent) => {
if (isResizing) {
const newWidth = Math.max(230, Math.min(500, mouseMoveEvent.clientX));
setSidebarWidth(newWidth);
}
}, [isResizing]);
React.useEffect(() => {
window.addEventListener('mousemove', resizeSidebar);
window.addEventListener('mouseup', stopResizing);
return () => {
window.removeEventListener('mousemove', resizeSidebar);
window.removeEventListener('mouseup', stopResizing);
};
}, [resizeSidebar, stopResizing]);
React.useEffect(() => {
if (isResizing) {
document.body.classList.add('no-select');
} else {
document.body.classList.remove('no-select');
}
}, [isResizing]);
// Load settings on startup
useEffect(() => {
window.electronAPI.getSettings().then(settings => {
setOllamaApiUrl(settings.ollamaApiUrl);
setColorScheme(settings.colorScheme);
setModel(settings.chatModel || ''); // Load the selected model, with a fallback
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
setStreamOutput(settings.streamOutput || false);
setScrollPositions(settings.scrollPositions || {}); // Load scroll positions
2025-08-22 23:42:34 +02:00
applyColorScheme(settings.colorScheme); // Apply initial scheme
});
const handleFocus = () => {
if (activeSidebarMode === 'chats') {
textareaRef.current?.focus();
}
};
window.electronAPI.onWindowFocus(handleFocus);
return () => {
// Clean up the listener when the component unmounts
// This part is tricky with the current setup, as `onWindowFocus` uses `ipcRenderer.on`
// which doesn't return a cleanup function. A more robust implementation
// would involve `ipcRenderer.removeListener`. For now, we'll assume this is okay
// for the lifetime of the app.
};
}, [activeSidebarMode]);
2025-08-22 23:42:34 +02:00
// Apply color scheme whenever it changes
useEffect(() => {
applyColorScheme(colorScheme);
}, [colorScheme]);
// Function to apply color scheme
const colorSchemes = {
};
function applyColorScheme(schemeName) {
const scheme = colorSchemes[schemeName];
if (scheme) {
for (const [key, value] of Object.entries(scheme)) {
document.documentElement.style.setProperty(key, value);
}
}
}
const fetchHistory = (sessionId) => {
if (!sessionId || !ollamaApiUrl) return;
fetch(`${ollamaApiUrl}/history?session_id=${encodeURIComponent(sessionId)}`)
.then(r => r.json())
.then(data => {
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === sessionId
? { ...session, messages: data.messages || [] }
: session
)
);
})
.catch(() => {});
};
// Load chat sessions from backend on initial render
useEffect(() => {
if (!ollamaApiUrl) return;
setLoading(true);
fetch(`${ollamaApiUrl}/sessions`)
.then(r => r.json())
.then(data => {
const sessionsWithMessages = data.sessions.map(s => ({ ...s, messages: [] }));
setChatSessions(sessionsWithMessages);
if (sessionsWithMessages.length > 0) {
setActiveSessionId(sessionsWithMessages[0].session_id);
} else {
setActiveSessionId(null);
}
setLoading(false);
})
.catch(() => {
setLoading(false);
});
}, [ollamaApiUrl]);
// Load messages for the active session
useEffect(() => {
fetchHistory(activeSessionId);
}, [activeSessionId]);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const handleSidebarClick = (mode) => {
// Saving happens in the centralized cleanup effect below
setActiveSidebarMode(mode);
};
const handleSelectChat = (sessionId) => {
// Saving happens in the centralized cleanup effect below
selectChat(sessionId);
};
2025-08-22 23:42:34 +02:00
const messages = useMemo(() => {
return chatSessions.find(s => s.session_id === activeSessionId)?.messages || [];
}, [activeSessionId, chatSessions]);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
// Persist the scrollTop of the session we are LEAVING (on chat change or when leaving the chat view)
2025-08-22 23:42:34 +02:00
useEffect(() => {
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const leavingSessionId = activeSessionId;
const leavingMode = activeSidebarMode;
2025-08-22 23:42:34 +02:00
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
return () => {
if (leavingMode === 'chats' && leavingSessionId) {
const top = typeof scrollTopsRef.current[leavingSessionId] === 'number'
? scrollTopsRef.current[leavingSessionId]
: (chatRef.current ? chatRef.current.scrollTop : 0);
setScrollPositions(prev => {
const updated = { ...prev, [leavingSessionId]: top };
window.electronAPI.updateSettings({ scrollPositions: updated });
return updated;
});
}
};
}, [activeSessionId, activeSidebarMode]);
// Track scroll + whether user left bottom
useEffect(() => {
const chatDiv = chatRef.current;
if (!chatDiv) return;
const handleScroll = () => {
const { scrollTop, scrollHeight, clientHeight } = chatDiv;
const isAtBottom = (scrollHeight - scrollTop - clientHeight) <= BOTTOM_EPSILON;
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
if (activeSessionId) {
const prevScrollTop = prevScrollTopsRef.current[activeSessionId];
const scrolledUp = typeof prevScrollTop === 'number' && scrollTop < prevScrollTop;
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
scrollTopsRef.current[activeSessionId] = scrollTop;
if (isAtBottom) {
setUserScrolledUp(activeSessionId, false); // User is at bottom, enable autoscroll
} else if (scrolledUp) {
setUserScrolledUp(activeSessionId, true); // User scrolled up, disable autoscroll
}
// If user scrolled down but not to bottom, maintain current userScrolledUp state
prevScrollTopsRef.current[activeSessionId] = scrollTop;
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
}
};
chatDiv.addEventListener('scroll', handleScroll);
return () => chatDiv.removeEventListener('scroll', handleScroll);
}, [activeSessionId, setUserScrolledUp]);
// Auto-hide the tip if user returns to bottom in the active chat
useEffect(() => {
const sid = activeSessionId;
if (!sid) return;
if (userScrolledUpState[sid] === false) {
setNewMsgTip(prev => {
if (!(sid in prev)) return prev;
const rest = { ...prev };
delete rest[sid];
return rest;
});
}
}, [activeSessionId, userScrolledUpState]);
// --- Robust restoration: do it before paint, exactly once per open ---
useLayoutEffect(() => {
if (activeSidebarMode !== 'chats' || !activeSessionId) return;
const div = chatRef.current;
if (!div) return;
restoredForRef.current = null;
const applyRestore = () => {
if (restoredForRef.current === activeSessionId) return;
const liveSaved = typeof scrollTopsRef.current[activeSessionId] === 'number'
? scrollTopsRef.current[activeSessionId]
: undefined;
const saved = typeof liveSaved === 'number'
? liveSaved
: scrollPositions[activeSessionId];
if (typeof saved === 'number') {
div.scrollTop = saved;
restoredForRef.current = activeSessionId;
return;
}
if (messages.length > 0) {
// default: bottom when no saved position
div.scrollTop = div.scrollHeight;
restoredForRef.current = activeSessionId;
}
};
// Run immediately (pre-paint) and also schedule a fallback rAF
applyRestore();
const r0 = requestAnimationFrame(applyRestore);
// If content size/DOM changes after first paint, apply once
const onDomChange = () => {
if (restoredForRef.current !== activeSessionId) {
requestAnimationFrame(applyRestore);
}
};
const mo = new MutationObserver(onDomChange);
mo.observe(div, { childList: true, subtree: true });
const ro = new ResizeObserver(onDomChange);
ro.observe(div);
return () => {
cancelAnimationFrame(r0);
mo.disconnect();
ro.disconnect();
};
}, [activeSessionId, activeSidebarMode, messages.length, scrollPositions]);
// If there is no saved scroll and content arrives later (e.g., on first app load),
// default to bottom exactly once for this open chat.
useEffect(() => {
if (activeSidebarMode !== 'chats' || !activeSessionId) return;
if (restoredForRef.current === activeSessionId) return; // already applied
const liveSaved = typeof scrollTopsRef.current[activeSessionId] === 'number'
? scrollTopsRef.current[activeSessionId]
: undefined;
const savedScrollTop = typeof liveSaved === 'number'
? liveSaved
: scrollPositions[activeSessionId];
// Only when there is no saved position and we now have content
if (typeof savedScrollTop !== 'number' && messages.length > 0) {
requestAnimationFrame(() => {
const div = chatRef.current;
if (!div) return;
div.scrollTop = div.scrollHeight;
restoredForRef.current = activeSessionId;
});
}
}, [messages.length, activeSessionId, activeSidebarMode, scrollPositions]);
// Session-aware scroll helpers
const scrollToBottom = (behavior = 'smooth', sessionId = null) => {
const chatDiv = chatRef.current;
if (!chatDiv) return;
const target = sessionId ?? activeSessionIdRef.current;
if (activeSessionIdRef.current !== target) return;
chatDiv.scrollTo({ top: chatDiv.scrollHeight, behavior });
setUserScrolledUp(target, false);
};
2025-08-22 23:42:34 +02:00
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const scrollMessageToTop = (msgId, behavior = 'auto', sessionId = null) => {
const chatDiv = chatRef.current;
if (!chatDiv) return;
const target = sessionId ?? activeSessionIdRef.current;
if (activeSessionIdRef.current !== target) return;
const el = document.getElementById(msgId);
if (el) {
const top = Math.max(0, el.offsetTop - TOP_ALIGN_OFFSET);
chatDiv.scrollTo({ top, behavior });
}
};
// Handler for new message tip click
const handleNewMsgTipClick = () => {
const sid = activeSessionIdRef.current;
const msgId = newMsgTip[sid];
if (msgId) {
scrollMessageToTop(msgId, 'smooth', sid);
setNewMsgTip(prev => {
const { [sid]: _omit, ...rest } = prev;
return rest;
});
}
};
async function sendMessage() {
2025-08-22 23:42:34 +02:00
if (!input.trim() || !model) return;
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
let targetSessionId = activeSessionId;
let isNewChat = false;
2025-08-22 23:42:34 +02:00
if (!targetSessionId) {
const newSession = await createNewChat();
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
await new Promise(resolve => setTimeout(resolve, 200));
2025-08-22 23:42:34 +02:00
targetSessionId = newSession.session_id;
isNewChat = true;
} else {
const currentSession = chatSessions.find(s => s.session_id === targetSessionId);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
isNewChat = currentSession && currentSession.name === "New Chat" && currentSession.messages.length === 0;
2025-08-22 23:42:34 +02:00
}
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const userMsg = { role: 'user', content: input.trim(), id: `msg-${Date.now()}-${Math.random()}` };
justSentMessage.current = true;
lastSentSessionRef.current = targetSessionId;
setUserScrolledUp(targetSessionId, false);
// Cancel any pending restore for the active session (we're about to control the scroll)
if (activeSessionIdRef.current === targetSessionId) {
restoredForRef.current = activeSessionIdRef.current; // mark as already restored
}
2025-08-22 23:42:34 +02:00
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
// Optimistic add and flush DOM, then scroll to bottom
flushSync(() => {
2025-08-22 23:42:34 +02:00
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === targetSessionId
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
? { ...session, messages: [...(session.messages || []), userMsg] }
2025-08-22 23:42:34 +02:00
: session
)
);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
setInput('');
});
requestAnimationFrame(() => scrollToBottom('auto', targetSessionId));
setIsSending(true);
try {
if (streamOutput) {
const assistantMsgId = `msg-${Date.now()}-${Math.random()}`;
const assistantMsg = { role: 'assistant', content: '', id: assistantMsgId };
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === targetSessionId
? { ...session, messages: [...(session.messages || []), assistantMsg] }
: session
)
);
(async () => {
try {
const res = await fetch(`${ollamaApiUrl}/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
session_id: targetSessionId,
model,
message: userMsg.content,
stream: true
})
});
const reader = res.body.getReader();
const decoder = new TextDecoder();
let fullReply = '';
let pendingMarked = false;
while (true) {
const { value, done } = await reader.read();
if (done) {
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === targetSessionId
? {
...session,
messages: session.messages.map(m =>
m.id === assistantMsgId ? { ...m, content: fullReply } : m
)
}
: session
)
);
if (activeSessionIdRef.current === targetSessionId) {
if (!userScrolledUpRef.current[targetSessionId]) {
// user stayed at bottom -> reveal the message immediately
requestAnimationFrame(() => scrollMessageToTop(assistantMsgId, 'smooth', targetSessionId));
} else {
// user scrolled away while it was generating -> show tip instead of auto-scroll
setNewMsgTip(prev => ({ ...prev, [targetSessionId]: assistantMsgId }));
}
} else {
setPendingScrollToLastUser(prev => ({ ...prev, [targetSessionId]: assistantMsgId }));
setUnreadSessions(prev => [...new Set([...prev, targetSessionId])]);
}
break;
}
const chunk = decoder.decode(value, { stream: true });
fullReply += chunk;
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === targetSessionId
? {
...session,
messages: session.messages.map(m =>
m.id === assistantMsgId ? { ...m, content: fullReply } : m
)
}
: session
)
);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
// Keep sticky-bottom *only* when streaming in the active chat and user is at/near bottom.
// This restores the old "push down while generating" behavior without fighting user scrolls.
if (
activeSessionIdRef.current === targetSessionId &&
!userScrolledUpRef.current[targetSessionId]
) {
// use 'auto' so it stays snappy during streaming
scrollToBottom('auto', targetSessionId);
}
// If streaming in a background chat, prepare a one-time guided scroll
if (activeSessionIdRef.current !== targetSessionId && !pendingMarked) {
setPendingScrollToLastUser(prev => ({ ...prev, [targetSessionId]: assistantMsgId }));
pendingMarked = true;
}
}
} catch (e) {
console.error("Failed to send message:", e);
const errorMsg = { role: 'assistant', content: 'Error: ' + e.message, id: `msg-${Date.now()}-${Math.random()}` };
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === targetSessionId
? { ...session, messages: [...session.messages.slice(0, -1), errorMsg] }
: session
)
);
} finally {
setIsSending(false);
}
})();
} else {
const res = await fetch(`${ollamaApiUrl}/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
session_id: targetSessionId,
model,
message: userMsg.content,
stream: false
})
});
const data = await res.json();
const assistantMsgId = `msg-${Date.now()}`;
const assistantMsg = { role: 'assistant', content: data.reply, id: assistantMsgId };
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === targetSessionId
? { ...session, messages: [...(session.messages || []), assistantMsg] }
: session
)
);
// For non-stream: align new ASSISTANT message to top, unless user scrolled away
if (assistantMsgId) {
if (activeSessionIdRef.current === targetSessionId) {
if (!userScrolledUpRef.current[targetSessionId]) {
requestAnimationFrame(() => scrollMessageToTop(assistantMsgId, 'smooth', targetSessionId));
} else {
// <<< show the tip if user scrolled away while waiting >>>
setNewMsgTip(prev => ({ ...prev, [targetSessionId]: assistantMsgId }));
}
} else {
setPendingScrollToLastUser(prev => ({ ...prev, [targetSessionId]: assistantMsgId }));
}
}
setIsSending(false);
}
2025-08-22 23:42:34 +02:00
if (activeSessionIdRef.current !== targetSessionId) {
setUnreadSessions(prev => [...new Set([...prev, targetSessionId])]);
}
if (isNewChat) {
fetch(`${ollamaApiUrl}/generate-title`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
session_id: targetSessionId,
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
message: userMsg.content,
model: model
2025-08-22 23:42:34 +02:00
})
})
.then(r => r.json())
.then(data => {
const sanitizedTitle = data.title.replace(/<think(?:ing)?>[\s\S]*?<\/think(?:ing)?>/i, '').trim();
2025-08-22 23:42:34 +02:00
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === targetSessionId ? { ...session, name: sanitizedTitle } : session
2025-08-22 23:42:34 +02:00
)
);
});
}
} catch (e) {
console.error("Failed to send message:", e);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
const errorMsg = { role: 'assistant', content: 'Error: ' + e.message, id: `msg-${Date.now()}-${Math.random()}` };
2025-08-22 23:42:34 +02:00
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === targetSessionId
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
? { ...session, messages: [...session.messages, errorMsg] }
2025-08-22 23:42:34 +02:00
: session
)
);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
setIsSending(false);
2025-08-22 23:42:34 +02:00
}
}
async function createNewChat() {
const newSessionId = 'sess-' + Math.random().toString(36).slice(2) + Date.now().toString(36);
const res = await fetch(`${ollamaApiUrl}/sessions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ session_id: newSessionId })
});
const newSession = await res.json();
const sessionWithMessages = { ...newSession, messages: [] };
setChatSessions(prevSessions => [sessionWithMessages, ...prevSessions]);
setActiveSessionId(newSession.session_id);
textareaRef.current?.focus();
2025-08-22 23:42:34 +02:00
return newSession;
}
function selectChat(sessionId) {
setActiveSessionId(sessionId);
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
// Clear unread dot immediately for this chat
2025-08-22 23:42:34 +02:00
setUnreadSessions(prev => prev.filter(id => id !== sessionId));
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
// If we had queued a guided scroll for this chat (from background replies), run it now, smoothly
const pendingId = pendingScrollToLastUser[sessionId];
if (pendingId) {
// Defer until the chat content renders; restoration is gated by restoredForRef, so won't fight
requestAnimationFrame(() => {
let tries = 12; // ~200ms @ 60fps
const attempt = () => {
const chatDiv = chatRef.current;
if (!chatDiv) return;
let el = document.getElementById(pendingId);
if (!el) {
const sess = chatSessions.find(s => s.session_id === sessionId);
if (sess && Array.isArray(sess.messages)) {
for (let i = sess.messages.length - 1; i >= 0; i--) {
const m = sess.messages[i];
if (m.role === 'assistant' && m.id) { el = document.getElementById(m.id); break; }
}
}
}
if (el) {
scrollMessageToTop(el.id, 'smooth', sessionId);
setPendingScrollToLastUser(prev => {
const { [sessionId]: _omit, ...rest } = prev;
return rest;
});
} else if (tries-- > 0) {
requestAnimationFrame(attempt);
}
};
requestAnimationFrame(attempt);
});
}
2025-08-22 23:42:34 +02:00
}
function handleRename(sessionId, newName) {
fetch(`${ollamaApiUrl}/sessions/${sessionId}/rename`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ title: newName })
})
.then(() => {
setChatSessions(prevSessions =>
prevSessions.map(session =>
session.session_id === sessionId ? { ...session, name: newName } : session
)
);
setEditingSessionId(null);
});
}
function handleDelete(sessionId) {
fetch(`${ollamaApiUrl}/sessions/${sessionId}`, { method: 'DELETE' })
.then(() => {
const newSessions = chatSessions.filter(s => s.session_id !== sessionId);
setChatSessions(newSessions);
if (activeSessionId === sessionId) {
setActiveSessionId(newSessions.length > 0 ? newSessions[0].session_id : null);
}
});
}
// Auto-delete empty "New Chat" sessions
useEffect(() => {
const emptyNewChats = chatSessions.filter(
s => s.name === "New Chat" && s.session_id !== activeSessionId && s.messages.length === 0
);
if (emptyNewChats.length > 0) {
emptyNewChats.forEach(chat => {
handleDelete(chat.session_id);
});
}
}, [activeSessionId, chatSessions, ollamaApiUrl]);
const handleChatFrameClick = (e) => {
const selection = window.getSelection();
if (selection.toString().length > 0) {
return;
}
if (document.activeElement === textareaRef.current) {
return;
}
if (e.target.closest('.msg')) {
return;
}
textareaRef.current?.focus();
};
2025-08-22 23:42:34 +02:00
return (
<div className="app" style={{ gridTemplateColumns: `${sidebarWidth}px 1fr` }}>
<div className="sidebar">
<div className="sidebar-header">
<div
className={`sidebar-tab ${activeSidebarMode === 'chats' ? 'active' : ''}`}
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
onClick={() => handleSidebarClick('chats')}
2025-08-22 23:42:34 +02:00
>
Chats
</div>
<div
className={`sidebar-tab ${activeSidebarMode === 'dbs' ? 'active' : ''}`}
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
onClick={() => handleSidebarClick('dbs')}
2025-08-22 23:42:34 +02:00
>
DBs
</div>
<div
className={`sidebar-tab ${activeSidebarMode === 'settings' ? 'active' : ''}`}
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
onClick={() => handleSidebarClick('settings')}
2025-08-22 23:42:34 +02:00
>
Settings
</div>
</div>
<div className="sidebar-content">
{activeSidebarMode === 'chats' && (
<div className="chat-list">
{chatSessions.map(session => (
<div
key={session.session_id}
className={`chat-item ${session.session_id === activeSessionId ? 'active' : ''}`}
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
onClick={() => handleSelectChat(session.session_id)}
2025-08-22 23:42:34 +02:00
>
{editingSessionId === session.session_id ? (
<input
type="text"
className="rename-input"
defaultValue={session.name}
onBlur={() => setEditingSessionId(null)}
onKeyDown={(e) => {
if (e.key === 'Enter') {
handleRename(session.session_id, e.target.value);
} else if (e.key === 'Escape') {
setEditingSessionId(null);
}
}}
autoFocus
/>
) : (
<>
<span>{session.name}</span>
<div className="chat-item-buttons">
{unreadSessions.includes(session.session_id) && <div className="unread-dot"></div>}
<button className="icon-button" onClick={(e) => { e.stopPropagation(); setEditingSessionId(session.session_id); }}>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" className="feather feather-edit-2"><path d="M17 3a2.828 2.828 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5L17 3z"></path></svg>
</button>
<button className="icon-button" onClick={(e) => { e.stopPropagation(); handleDelete(session.session_id); }}>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" className="feather feather-x"><line x1="18" y1="6" x2="6" y2="18"></line><line x1="6" y1="6" x2="18" y2="18"></line></svg>
</button>
</div>
</>
)}
</div>
))}
</div>
)}
{activeSidebarMode === 'dbs' && (
<div className="db-list">
<div className="empty-list-message">No databases yet.</div>
</div>
)}
{activeSidebarMode === 'settings' && (
<div className="settings-list">
<div
className={`settings-item ${activeSettingsSubmenu === 'General' ? 'active' : ''}`}
onClick={() => setActiveSettingsSubmenu('General')}
>
General
</div>
<div
className={`settings-item ${activeSettingsSubmenu === 'Interface' ? 'active' : ''}`}
onClick={() => setActiveSettingsSubmenu('Interface')}
>
Interface
</div>
</div>
)}
</div>
{activeSidebarMode !== 'settings' && (
<div className="sidebar-footer">
{activeSidebarMode === 'chats' && (
<button className="button new-chat-button" onClick={createNewChat}>New Chat</button>
)}
{activeSidebarMode === 'dbs' && (
<button className="button new-db-button" onClick={() => {}}>New Database</button>
)}
</div>
)}
<div className="resizer" onMouseDown={startResizing}></div>
</div>
<div className="main-content">
{activeSidebarMode === 'chats' && (
<>
<div className="header">
<strong>Chat - {chatSessions.find(s => s.session_id === activeSessionId)?.name || 'New Chat'}</strong>
</div>
<div key={activeSessionId} className="chat" ref={chatRef} onClick={handleChatFrameClick}>
2025-08-22 23:42:34 +02:00
{messages.map((m, i) => (
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
<div key={m.id || i} id={m.id} className={'msg ' + (m.role === 'user' ? 'user' : 'assistant')}>
{m.role === 'assistant'
? <AssistantMessageContent content={m.content} streamOutput={streamOutput} />
: <div className="msg-content" dangerouslySetInnerHTML={{ __html: markdownToHTML(m.content) }} />
}
2025-08-22 23:42:34 +02:00
</div>
))}
</div>
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
{/* New message tip (active chat only) */}
{newMsgTip[activeSessionId] && (
<button
className="new-msg-tip"
onClick={handleNewMsgTipClick}
title="Jump to the new message"
aria-label="Jump to the new message"
>
New message<span style={{ marginLeft: 6 }}></span>
</button>
)}
2025-08-22 23:42:34 +02:00
<div className="footer">
<div className="footer-content-wrapper">
<TextareaAutosize
ref={textareaRef}
2025-08-22 23:42:34 +02:00
className="input"
value={input}
onChange={e => setInput(e.target.value)}
onKeyDown={e => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
sendMessage();
}
}}
placeholder="Ask any question..."
maxRows={13}
/>
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
<button className="button" onClick={sendMessage} disabled={isSending}>
{isSending ? <div className="spinner"></div> : 'Send'}
</button>
2025-08-22 23:42:34 +02:00
</div>
</div>
</>
)}
{activeSidebarMode === 'dbs' && (
<div className="placeholder-view">
<h1>Databases</h1>
<p>This is a placeholder for the database management view.</p>
</div>
)}
{activeSidebarMode === 'settings' && (
<>
<div className="header">
<strong>{activeSettingsSubmenu} Settings</strong>
</div>
feat: Add streaming chat + scroll persistence; improve markdown & links Backend - /chat: support streaming via StreamingResponse; save full reply after stream ends. Non-stream path unchanged. - ChatRequest: add stream flag (default false). - GenerateTitleRequest: add model and use it instead of hardcoded llama3. - ollama_client.chat_stream(): new async generator parsing Ollama streaming JSON (both formats). - Remove response_model from /chat to allow streaming; non-stream still returns { reply }. Electron - Open external links in system browser (setWindowOpenHandler, shell.openExternal). - New IPC: update-settings, open-external-link. - Set minimum window size; preload exposes updateSettings and openExternalLink. Frontend (React) - Streaming UI with live chunking; sticky-bottom only when user at bottom. - Per-session scroll persistence and robust restore. - New message tip to jump to latest reply when scrolled up. - Disable Send while sending; spinner. - General Settings: stream output toggle; propagate model/stream changes. - Apply color scheme at boot; extract colorSchemes helper. - Sidebar UX tweaks and unread badges. Markdown/rendering - Code blocks: language title bar and wrapper. - Tables: GitHub-style parsing, per-cell borders, rounded wrapper, spacing, alignment. - Headings: remove blank line after h1-h4. - <hr>: handle after tables; strip following whitespace. - Links: target=_blank with icon and URL tooltip. Styles - Add styles for code/table wrappers, new-message tip, toggle, spinner; hover/active vars; narrower sidebar. API notes / breaking changes - /chat accepts stream=true and returns text/plain streamed chunks. - generate-title now requires a model. - Non-stream /chat response shape unchanged.
2025-08-23 16:45:46 +02:00
{activeSettingsSubmenu === 'General' && (
<GeneralSettings
onModelChange={setModel}
streamOutput={streamOutput}
onStreamOutputChange={setStreamOutput}
/>
)}
2025-08-22 23:42:34 +02:00
{activeSettingsSubmenu === 'Interface' && <InterfaceSettings />}
</>
)}
</div>
</div>
)
}