diff --git a/backend/ollama_client.py b/backend/ollama_client.py index 281ab15..75878ed 100644 --- a/backend/ollama_client.py +++ b/backend/ollama_client.py @@ -1,4 +1,3 @@ -import asyncio import httpx import json import re @@ -147,7 +146,7 @@ async def list_models() -> Dict[str, Any]: return {"models": models} -async def list_model_catalog(*, refresh: bool = False) -> Dict[str, Any]: +async def list_model_catalog() -> Dict[str, Any]: ollama_url = get_ollama_api_url() async with httpx.AsyncClient(timeout=30.0) as client: r = await client.get(f"{ollama_url}/api/tags") @@ -158,7 +157,7 @@ async def list_model_catalog(*, refresh: bool = False) -> Dict[str, Any]: models = [ _build_model_catalog_entry( item or {}, - show_model(str((item or {}).get("name") or "").strip(), refresh=refresh) if False else _get_cached_model_details(str((item or {}).get("name") or "").strip()), + _get_cached_model_details(str((item or {}).get("name") or "").strip()), ) for item in raw_models if str((item or {}).get("name") or "").strip() diff --git a/src/GeneralSettings.jsx b/src/GeneralSettings.jsx index cf7674a..1d66f81 100644 --- a/src/GeneralSettings.jsx +++ b/src/GeneralSettings.jsx @@ -35,13 +35,13 @@ function resolveBackendApiUrl(settings) { return settings.backendApiUrl || settings.ollamaApiUrl || DEFAULT_BACKEND_API_URL; } -function buildSelectOptions(values, currentValue, missingLabel) { +function buildSelectOptions(values, currentValue, missingLabel, showMissingLabel = true) { const uniqueValues = [...new Set((Array.isArray(values) ? values : []).filter(Boolean))]; const options = uniqueValues.map(value => ({ value, label: value })); if (currentValue && !uniqueValues.includes(currentValue)) { options.unshift({ value: currentValue, - label: `${currentValue} (${missingLabel})`, + label: showMissingLabel ? `${currentValue} (${missingLabel})` : currentValue, }); } return options; @@ -93,6 +93,7 @@ export default function GeneralSettings({ const [isPurgingLibraries, setIsPurgingLibraries] = useState(false); const [libraryPurgeStatus, setLibraryPurgeStatus] = useState({ tone: 'neutral', message: '' }); const [settingsHydrated, setSettingsHydrated] = useState(false); + const [isLoadingModelCatalog, setIsLoadingModelCatalog] = useState(false); const audioInputSupported = supportsAudioInputCapture(); useEffect(() => { @@ -138,18 +139,40 @@ export default function GeneralSettings({ }, []); useEffect(() => { - if (backendApiUrl) { - fetch(backendApiUrl + '/models') - .then(r => r.json()) - .then(data => { - setChatModels(Array.isArray(data.chat_models) ? data.chat_models : []); - setEmbeddingModels(Array.isArray(data.embedding_models) ? data.embedding_models : []); - setVisionModels(Array.isArray(data.vision_models) ? data.vision_models : []); - setRerankingModels(Array.isArray(data.reranking_models) ? data.reranking_models : []); - setWhisperModels(Array.isArray(data.whisper_models) ? data.whisper_models.map(model => model.name).filter(Boolean) : []); - }) - .catch(err => console.error('Failed to load models', err)); + if (!backendApiUrl) { + setIsLoadingModelCatalog(false); + return () => {}; } + + let cancelled = false; + setIsLoadingModelCatalog(true); + + fetch(backendApiUrl + '/models') + .then(r => r.json()) + .then(data => { + if (cancelled) { + return; + } + setChatModels(Array.isArray(data.chat_models) ? data.chat_models : []); + setEmbeddingModels(Array.isArray(data.embedding_models) ? data.embedding_models : []); + setVisionModels(Array.isArray(data.vision_models) ? data.vision_models : []); + setRerankingModels(Array.isArray(data.reranking_models) ? data.reranking_models : []); + setWhisperModels(Array.isArray(data.whisper_models) ? data.whisper_models.map(model => model.name).filter(Boolean) : []); + }) + .catch(err => { + if (!cancelled) { + console.error('Failed to load models', err); + } + }) + .finally(() => { + if (!cancelled) { + setIsLoadingModelCatalog(false); + } + }); + + return () => { + cancelled = true; + }; }, [backendApiUrl, ollamaApiUrl]); useEffect(() => { @@ -500,11 +523,12 @@ export default function GeneralSettings({ const audioDeviceRefreshLabel = audioInputDevices.some(device => device.hasLabel) ? 'Refresh devices' : 'Allow microphone access'; - const chatModelOptions = buildSelectOptions(chatModels, selectedModel, 'saved model unavailable'); - const visionModelOptions = buildSelectOptions(visionModels, visionModel, 'saved model unavailable'); - const embeddingModelOptions = buildSelectOptions(embeddingModels, embedModel, 'saved model unavailable'); - const rerankingModelOptions = buildSelectOptions(rerankingModels, rerankModel, 'saved model unavailable'); - const transcriptionModelOptions = buildSelectOptions(whisperModels, transcriptionModel, 'saved model unavailable'); + const showMissingModelLabel = !isLoadingModelCatalog; + const chatModelOptions = buildSelectOptions(chatModels, selectedModel, 'saved model unavailable', showMissingModelLabel); + const visionModelOptions = buildSelectOptions(visionModels, visionModel, 'saved model unavailable', showMissingModelLabel); + const embeddingModelOptions = buildSelectOptions(embeddingModels, embedModel, 'saved model unavailable', showMissingModelLabel); + const rerankingModelOptions = buildSelectOptions(rerankingModels, rerankModel, 'saved model unavailable', showMissingModelLabel); + const transcriptionModelOptions = buildSelectOptions(whisperModels, transcriptionModel, 'saved model unavailable', showMissingModelLabel); return (
@@ -659,7 +683,7 @@ export default function GeneralSettings({ value={visionModel} onChange={handleVisionModelChange} > - {visionModelOptions.length === 0 && } + {visionModelOptions.length === 0 && } {visionModelOptions.map(model => )}