Add automatic model discovery from configured external providers
This commit is contained in:
2478
chat/package-lock.json
generated
2478
chat/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
136
chat/server.js
136
chat/server.js
@@ -7492,6 +7492,125 @@ function runCommand(command, args, options = {}) {
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
// Fetch models from OpenRouter API when API key is configured
|
||||
async function fetchOpenRouterModels() {
|
||||
if (!OPENROUTER_API_KEY) return [];
|
||||
try {
|
||||
const res = await fetch('https://openrouter.ai/api/v1/models', {
|
||||
headers: { 'Authorization': `Bearer ${OPENROUTER_API_KEY}` }
|
||||
});
|
||||
if (!res.ok) {
|
||||
log('OpenRouter models fetch failed', { status: res.status });
|
||||
return [];
|
||||
}
|
||||
const data = await res.json();
|
||||
const models = Array.isArray(data?.data) ? data.data : [];
|
||||
return models.map((m) => ({
|
||||
name: m.id || m.name,
|
||||
label: `${m.id || m.name} (OpenRouter)`,
|
||||
provider: 'openrouter'
|
||||
})).filter((m) => m.name);
|
||||
} catch (error) {
|
||||
log('OpenRouter models fetch error', { error: String(error) });
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch models from Mistral API when API key is configured
|
||||
async function fetchMistralModels() {
|
||||
if (!MISTRAL_API_KEY) return [];
|
||||
try {
|
||||
const res = await fetch('https://api.mistral.ai/v1/models', {
|
||||
headers: { 'Authorization': `Bearer ${MISTRAL_API_KEY}` }
|
||||
});
|
||||
if (!res.ok) {
|
||||
log('Mistral models fetch failed', { status: res.status });
|
||||
return [];
|
||||
}
|
||||
const data = await res.json();
|
||||
const models = Array.isArray(data?.data) ? data.data : [];
|
||||
return models.map((m) => ({
|
||||
name: m.id || m.name,
|
||||
label: `${m.id || m.name} (Mistral)`,
|
||||
provider: 'mistral'
|
||||
})).filter((m) => m.name);
|
||||
} catch (error) {
|
||||
log('Mistral models fetch error', { error: String(error) });
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch models from Groq API when API key is configured
|
||||
async function fetchGroqModels() {
|
||||
if (!GROQ_API_KEY) return [];
|
||||
try {
|
||||
const res = await fetch('https://api.groq.com/openai/v1/models', {
|
||||
headers: { 'Authorization': `Bearer ${GROQ_API_KEY}` }
|
||||
});
|
||||
if (!res.ok) {
|
||||
log('Groq models fetch failed', { status: res.status });
|
||||
return [];
|
||||
}
|
||||
const data = await res.json();
|
||||
const models = Array.isArray(data?.data) ? data.data : [];
|
||||
return models.map((m) => ({
|
||||
name: m.id || m.name,
|
||||
label: `${m.id || m.name} (Groq)`,
|
||||
provider: 'groq'
|
||||
})).filter((m) => m.name);
|
||||
} catch (error) {
|
||||
log('Groq models fetch error', { error: String(error) });
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch models from Google Gemini API when API key is configured
|
||||
async function fetchGoogleModels() {
|
||||
if (!GOOGLE_API_KEY) return [];
|
||||
try {
|
||||
const res = await fetch(`https://generativelanguage.googleapis.com/v1beta/models?key=${GOOGLE_API_KEY}`);
|
||||
if (!res.ok) {
|
||||
log('Google models fetch failed', { status: res.status });
|
||||
return [];
|
||||
}
|
||||
const data = await res.json();
|
||||
const models = Array.isArray(data?.models) ? data.models : [];
|
||||
return models.map((m) => ({
|
||||
name: m.name || m.id,
|
||||
label: `${m.name || m.id} (Google)`,
|
||||
provider: 'google'
|
||||
})).filter((m) => m.name);
|
||||
} catch (error) {
|
||||
log('Google models fetch error', { error: String(error) });
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch models from NVIDIA API when API key is configured
|
||||
async function fetchNvidiaModels() {
|
||||
if (!NVIDIA_API_KEY) return [];
|
||||
try {
|
||||
const res = await fetch('https://integrate.api.nvidia.com/v1/models', {
|
||||
headers: { 'Authorization': `Bearer ${NVIDIA_API_KEY}` }
|
||||
});
|
||||
if (!res.ok) {
|
||||
log('NVIDIA models fetch failed', { status: res.status });
|
||||
return [];
|
||||
}
|
||||
const data = await res.json();
|
||||
const models = Array.isArray(data?.data) ? data.data : [];
|
||||
return models.map((m) => ({
|
||||
name: m.id || m.name,
|
||||
label: `${m.id || m.name} (NVIDIA)`,
|
||||
provider: 'nvidia'
|
||||
})).filter((m) => m.name);
|
||||
} catch (error) {
|
||||
log('NVIDIA models fetch error', { error: String(error) });
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
async function listModels(cliName = 'opencode') {
|
||||
const now = Date.now();
|
||||
const normalizedCli = normalizeCli(cliName);
|
||||
@@ -7572,12 +7691,27 @@ async function listModels(cliName = 'opencode') {
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch models from configured external providers
|
||||
const externalProviderFetches = [
|
||||
fetchOpenRouterModels(),
|
||||
fetchMistralModels(),
|
||||
fetchGroqModels(),
|
||||
fetchGoogleModels(),
|
||||
fetchNvidiaModels()
|
||||
];
|
||||
|
||||
const externalResults = await Promise.allSettled(externalProviderFetches);
|
||||
for (const result of externalResults) {
|
||||
if (result.status === 'fulfilled' && Array.isArray(result.value)) {
|
||||
result.value.forEach((m) => addModel(m));
|
||||
}
|
||||
}
|
||||
|
||||
// Add fallback models per CLI
|
||||
const fallbackModels = {
|
||||
opencode: [],
|
||||
};
|
||||
|
||||
|
||||
// Add models from OPENCODE_EXTRA_MODELS env var
|
||||
if (process.env.OPENCODE_EXTRA_MODELS) {
|
||||
const extras = process.env.OPENCODE_EXTRA_MODELS.split(',').map((s) => s.trim()).filter(Boolean);
|
||||
|
||||
Reference in New Issue
Block a user