feat: dynamic provider model loading from models.dev

- Add fetchModelsDevProviderModels() function with 1-hour caching
- Update ensureOpencodeConfig() to dynamically load all models for configured providers
- Support Chutes, Cerebras, Groq, Google, and Nvidia providers
- Only fetch models for providers actually used in adminModels
- Cache models.dev data to reduce API calls and improve performance

This allows any model from models.dev to work when added in the admin panel,
instead of requiring hardcoded model definitions.
This commit is contained in:
southseact-3d
2026-02-11 10:53:37 +00:00
parent d0105ff967
commit 17792d39a0

View File

@@ -1527,6 +1527,10 @@ function stopAutoSave() {
} }
let cachedModels = new Map(); let cachedModels = new Map();
let cachedModelsAt = new Map(); let cachedModelsAt = new Map();
// Cache for models.dev provider data
let cachedModelsDevProviders = new Map();
let cachedModelsDevProvidersAt = new Map();
const MODELS_DEV_CACHE_TTL = 3600000; // 1 hour
const adminSessions = new Map(); const adminSessions = new Map();
let adminModels = []; let adminModels = [];
let adminModelIndex = new Map(); let adminModelIndex = new Map();
@@ -5120,33 +5124,62 @@ async function ensureOpencodeConfig(session) {
[providerName]: providerCfg [providerName]: providerCfg
}; };
// Add Chutes provider if API key is configured // Find which providers are used in adminModels
if (CHUTES_API_KEY) { const usedProviders = new Set();
providers.chutes = { for (const model of adminModels) {
options: { if (Array.isArray(model.providers)) {
apiKey: CHUTES_API_KEY, for (const p of model.providers) {
baseURL: 'https://llm.chutes.ai/v1' if (p.provider) usedProviders.add(p.provider.toLowerCase());
},
models: {
'deepseek-ai/DeepSeek-V3.2-TEE': {
id: 'deepseek-ai/DeepSeek-V3.2-TEE',
name: 'DeepSeek V3.2 TEE',
tool_call: true,
temperature: true
}
} }
}; }
if (model.primaryProvider) {
usedProviders.add(model.primaryProvider.toLowerCase());
}
} }
// Add Cerebras provider if API key is configured // Provider configurations with their base URLs
const cerebrasKey = process.env.CEREBRAS_API_KEY; const providerConfigs = {
if (cerebrasKey) { chutes: {
providers.cerebras = { apiKey: CHUTES_API_KEY,
options: { baseURL: 'https://llm.chutes.ai/v1'
apiKey: cerebrasKey },
}, cerebras: {
models: {} apiKey: process.env.CEREBRAS_API_KEY,
}; baseURL: 'https://api.cerebras.ai/v1'
},
groq: {
apiKey: GROQ_API_KEY,
baseURL: 'https://api.groq.com/openai/v1'
},
google: {
apiKey: GOOGLE_API_KEY,
baseURL: 'https://generativelanguage.googleapis.com/v1beta'
},
nvidia: {
apiKey: NVIDIA_API_KEY,
baseURL: 'https://integrate.api.nvidia.com/v1'
}
};
// Add providers that are both configured and used
for (const [providerId, config] of Object.entries(providerConfigs)) {
if (config.apiKey && usedProviders.has(providerId)) {
// Fetch all models from models.dev for this provider
const models = await fetchModelsDevProviderModels(providerId);
providers[providerId] = {
options: {
apiKey: config.apiKey,
baseURL: config.baseURL
},
models: models
};
log('Configured provider with models.dev data', {
provider: providerId,
modelCount: Object.keys(models).length
});
}
} }
const config = { const config = {
@@ -5174,7 +5207,8 @@ async function ensureOpencodeConfig(session) {
log('Created opencode config for session', { log('Created opencode config for session', {
sessionId: session.id, sessionId: session.id,
appId: appSegment, appId: appSegment,
userId: userSegment userId: userSegment,
providers: Object.keys(providers)
}); });
} catch (err) { } catch (err) {
log('Failed to create opencode config', { log('Failed to create opencode config', {
@@ -7971,6 +8005,63 @@ async function fetchChutesModels() {
} }
} }
// Fetch all models for a specific provider from models.dev API
async function fetchModelsDevProviderModels(providerId) {
const now = Date.now();
const cacheKey = providerId.toLowerCase();
// Check cache
if (cachedModelsDevProviders.has(cacheKey)) {
const cachedAt = cachedModelsDevProvidersAt.get(cacheKey) || 0;
if (now - cachedAt < MODELS_DEV_CACHE_TTL) {
log('Using cached models.dev data', { provider: providerId });
return cachedModelsDevProviders.get(cacheKey);
}
}
try {
log('Fetching models from models.dev', { provider: providerId });
const res = await fetch('https://models.dev/api.json', { timeout: 10000 });
if (!res.ok) {
log('models.dev fetch failed', { status: res.status, provider: providerId });
return [];
}
const data = await res.json();
const providerData = data[providerId.toLowerCase()];
if (!providerData || !providerData.models) {
log('No models found for provider', { provider: providerId });
return [];
}
// Convert models.dev format to opencode format
const models = {};
for (const [modelId, modelInfo] of Object.entries(providerData.models)) {
models[modelId] = {
id: modelId,
name: modelInfo.name || modelId,
tool_call: modelInfo.tool_call ?? true,
temperature: modelInfo.temperature ?? true
};
}
// Cache the results
cachedModelsDevProviders.set(cacheKey, models);
cachedModelsDevProvidersAt.set(cacheKey, now);
log('Fetched models from models.dev', {
provider: providerId,
count: Object.keys(models).length
});
return models;
} catch (error) {
log('models.dev fetch error', { error: String(error), provider: providerId });
return [];
}
}
async function listModels(cliName = 'opencode') { async function listModels(cliName = 'opencode') {
const now = Date.now(); const now = Date.now();
const normalizedCli = normalizeCli(cliName); const normalizedCli = normalizeCli(cliName);