From 17792d39a0d00c4b5d64e78feac8ab103ac17857 Mon Sep 17 00:00:00 2001 From: southseact-3d Date: Wed, 11 Feb 2026 10:53:37 +0000 Subject: [PATCH] feat: dynamic provider model loading from models.dev - Add fetchModelsDevProviderModels() function with 1-hour caching - Update ensureOpencodeConfig() to dynamically load all models for configured providers - Support Chutes, Cerebras, Groq, Google, and Nvidia providers - Only fetch models for providers actually used in adminModels - Cache models.dev data to reduce API calls and improve performance This allows any model from models.dev to work when added in the admin panel, instead of requiring hardcoded model definitions. --- chat/server.js | 141 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 116 insertions(+), 25 deletions(-) diff --git a/chat/server.js b/chat/server.js index 1d3a1c0..5d9d6ab 100644 --- a/chat/server.js +++ b/chat/server.js @@ -1527,6 +1527,10 @@ function stopAutoSave() { } let cachedModels = new Map(); let cachedModelsAt = new Map(); +// Cache for models.dev provider data +let cachedModelsDevProviders = new Map(); +let cachedModelsDevProvidersAt = new Map(); +const MODELS_DEV_CACHE_TTL = 3600000; // 1 hour const adminSessions = new Map(); let adminModels = []; let adminModelIndex = new Map(); @@ -5120,33 +5124,62 @@ async function ensureOpencodeConfig(session) { [providerName]: providerCfg }; - // Add Chutes provider if API key is configured - if (CHUTES_API_KEY) { - providers.chutes = { - options: { - apiKey: CHUTES_API_KEY, - baseURL: 'https://llm.chutes.ai/v1' - }, - models: { - 'deepseek-ai/DeepSeek-V3.2-TEE': { - id: 'deepseek-ai/DeepSeek-V3.2-TEE', - name: 'DeepSeek V3.2 TEE', - tool_call: true, - temperature: true - } + // Find which providers are used in adminModels + const usedProviders = new Set(); + for (const model of adminModels) { + if (Array.isArray(model.providers)) { + for (const p of model.providers) { + if (p.provider) usedProviders.add(p.provider.toLowerCase()); } - }; + } + if (model.primaryProvider) { + usedProviders.add(model.primaryProvider.toLowerCase()); + } } - // Add Cerebras provider if API key is configured - const cerebrasKey = process.env.CEREBRAS_API_KEY; - if (cerebrasKey) { - providers.cerebras = { - options: { - apiKey: cerebrasKey - }, - models: {} - }; + // Provider configurations with their base URLs + const providerConfigs = { + chutes: { + apiKey: CHUTES_API_KEY, + baseURL: 'https://llm.chutes.ai/v1' + }, + cerebras: { + apiKey: process.env.CEREBRAS_API_KEY, + baseURL: 'https://api.cerebras.ai/v1' + }, + groq: { + apiKey: GROQ_API_KEY, + baseURL: 'https://api.groq.com/openai/v1' + }, + google: { + apiKey: GOOGLE_API_KEY, + baseURL: 'https://generativelanguage.googleapis.com/v1beta' + }, + nvidia: { + apiKey: NVIDIA_API_KEY, + baseURL: 'https://integrate.api.nvidia.com/v1' + } + }; + + // Add providers that are both configured and used + for (const [providerId, config] of Object.entries(providerConfigs)) { + if (config.apiKey && usedProviders.has(providerId)) { + // Fetch all models from models.dev for this provider + const models = await fetchModelsDevProviderModels(providerId); + + providers[providerId] = { + options: { + apiKey: config.apiKey, + baseURL: config.baseURL + }, + models: models + }; + + log('Configured provider with models.dev data', { + provider: providerId, + modelCount: Object.keys(models).length + }); + } } const config = { @@ -5174,7 +5207,8 @@ async function ensureOpencodeConfig(session) { log('Created opencode config for session', { sessionId: session.id, appId: appSegment, - userId: userSegment + userId: userSegment, + providers: Object.keys(providers) }); } catch (err) { log('Failed to create opencode config', { @@ -7971,6 +8005,63 @@ async function fetchChutesModels() { } } +// Fetch all models for a specific provider from models.dev API +async function fetchModelsDevProviderModels(providerId) { + const now = Date.now(); + const cacheKey = providerId.toLowerCase(); + + // Check cache + if (cachedModelsDevProviders.has(cacheKey)) { + const cachedAt = cachedModelsDevProvidersAt.get(cacheKey) || 0; + if (now - cachedAt < MODELS_DEV_CACHE_TTL) { + log('Using cached models.dev data', { provider: providerId }); + return cachedModelsDevProviders.get(cacheKey); + } + } + + try { + log('Fetching models from models.dev', { provider: providerId }); + const res = await fetch('https://models.dev/api.json', { timeout: 10000 }); + if (!res.ok) { + log('models.dev fetch failed', { status: res.status, provider: providerId }); + return []; + } + + const data = await res.json(); + const providerData = data[providerId.toLowerCase()]; + + if (!providerData || !providerData.models) { + log('No models found for provider', { provider: providerId }); + return []; + } + + // Convert models.dev format to opencode format + const models = {}; + for (const [modelId, modelInfo] of Object.entries(providerData.models)) { + models[modelId] = { + id: modelId, + name: modelInfo.name || modelId, + tool_call: modelInfo.tool_call ?? true, + temperature: modelInfo.temperature ?? true + }; + } + + // Cache the results + cachedModelsDevProviders.set(cacheKey, models); + cachedModelsDevProvidersAt.set(cacheKey, now); + + log('Fetched models from models.dev', { + provider: providerId, + count: Object.keys(models).length + }); + + return models; + } catch (error) { + log('models.dev fetch error', { error: String(error), provider: providerId }); + return []; + } +} + async function listModels(cliName = 'opencode') { const now = Date.now(); const normalizedCli = normalizeCli(cliName);