Fix Gemini model integration with correct model name and endpoint

This commit is contained in:
Steve White 2025-02-28 15:04:07 -06:00
parent a09eb1519e
commit f94bde875b
3 changed files with 10 additions and 4 deletions

View File

@ -84,11 +84,11 @@ models:
gemini-2.0-flash-lite:
provider: "google"
model_name: "gemini-2.0-flash-lite-001"
model_name: "gemini-2.0-flash-lite"
temperature: 0.5
max_tokens: 2048
top_p: 1.0
endpoint: "https://generativelanguage.googleapis.com/v1"
endpoint: "https://generativelanguage.googleapis.com/v1beta"
# Default model to use if not specified for a module
default_model: "llama-3.1-8b-instant" # Using Groq's Llama 3.1 8B model for testing

View File

@ -106,7 +106,10 @@ class LLMInterface:
# Special handling for Google Gemini models
params['model'] = f"gemini/{self.model_config.get('model_name', self.model_name)}"
# Google Gemini uses a different API base
params['api_base'] = self.model_config.get('endpoint', 'https://generativelanguage.googleapis.com/v1')
params['api_base'] = self.model_config.get('endpoint', 'https://generativelanguage.googleapis.com/v1beta')
# Add additional parameters for Gemini
params['custom_llm_provider'] = 'gemini'
else:
# Standard provider (OpenAI, Anthropic, etc.)
params['model'] = self.model_name

View File

@ -102,7 +102,10 @@ class ReportSynthesizer:
# Special handling for Google Gemini models
params['model'] = f"gemini/{self.model_config.get('model_name', self.model_name)}"
# Google Gemini uses a different API base
params['api_base'] = self.model_config.get('endpoint', 'https://generativelanguage.googleapis.com/v1')
params['api_base'] = self.model_config.get('endpoint', 'https://generativelanguage.googleapis.com/v1beta')
# Add additional parameters for Gemini
params['custom_llm_provider'] = 'gemini'
else:
# Standard provider (OpenAI, Anthropic, etc.)
params['model'] = self.model_name