Fix Google Gemini model integration with LiteLLM
This commit is contained in:
parent
c687216393
commit
6ff455358c
|
@ -84,7 +84,7 @@ models:
|
||||||
|
|
||||||
gemini-2.0-flash-lite:
|
gemini-2.0-flash-lite:
|
||||||
provider: "google"
|
provider: "google"
|
||||||
model_name: "google/gemini-2.0-flash-lite-001"
|
model_name: "gemini-2.0-flash-lite-001"
|
||||||
temperature: 0.5
|
temperature: 0.5
|
||||||
max_tokens: 2048
|
max_tokens: 2048
|
||||||
top_p: 1.0
|
top_p: 1.0
|
||||||
|
|
|
@ -101,7 +101,7 @@ class LLMInterface:
|
||||||
}
|
}
|
||||||
elif provider == 'google':
|
elif provider == 'google':
|
||||||
# Special handling for Google Gemini models
|
# Special handling for Google Gemini models
|
||||||
params['model'] = self.model_config.get('model_name', self.model_name)
|
params['model'] = f"google/{self.model_config.get('model_name', self.model_name)}"
|
||||||
# Google Gemini uses a different API base
|
# Google Gemini uses a different API base
|
||||||
params['api_base'] = self.model_config.get('endpoint', 'https://generativelanguage.googleapis.com/v1')
|
params['api_base'] = self.model_config.get('endpoint', 'https://generativelanguage.googleapis.com/v1')
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -95,6 +95,11 @@ class ReportSynthesizer:
|
||||||
'HTTP-Referer': 'https://sim-search.app', # Replace with your actual app URL
|
'HTTP-Referer': 'https://sim-search.app', # Replace with your actual app URL
|
||||||
'X-Title': 'Intelligent Research System' # Replace with your actual app name
|
'X-Title': 'Intelligent Research System' # Replace with your actual app name
|
||||||
}
|
}
|
||||||
|
elif provider == 'google':
|
||||||
|
# Special handling for Google Gemini models
|
||||||
|
params['model'] = f"google/{self.model_config.get('model_name', self.model_name)}"
|
||||||
|
# Google Gemini uses a different API base
|
||||||
|
params['api_base'] = self.model_config.get('endpoint', 'https://generativelanguage.googleapis.com/v1')
|
||||||
else:
|
else:
|
||||||
# Standard provider (OpenAI, Anthropic, etc.)
|
# Standard provider (OpenAI, Anthropic, etc.)
|
||||||
params['model'] = self.model_name
|
params['model'] = self.model_name
|
||||||
|
@ -118,6 +123,9 @@ class ReportSynthesizer:
|
||||||
params['messages'] = messages
|
params['messages'] = messages
|
||||||
params['stream'] = stream
|
params['stream'] = stream
|
||||||
|
|
||||||
|
logger.info(f"Generating completion with model: {params.get('model')}")
|
||||||
|
logger.info(f"Provider: {self.model_config.get('provider')}")
|
||||||
|
|
||||||
response = completion(**params)
|
response = completion(**params)
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
|
@ -132,6 +140,13 @@ class ReportSynthesizer:
|
||||||
return content
|
return content
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error generating completion: {e}")
|
logger.error(f"Error generating completion: {e}")
|
||||||
|
logger.error(f"Model params: {params}")
|
||||||
|
|
||||||
|
# More detailed error for debugging
|
||||||
|
if hasattr(e, '__dict__'):
|
||||||
|
for key, value in e.__dict__.items():
|
||||||
|
logger.error(f"Error detail - {key}: {value}")
|
||||||
|
|
||||||
return f"Error: {str(e)}"
|
return f"Error: {str(e)}"
|
||||||
|
|
||||||
def _process_thinking_tags(self, content: str) -> str:
|
def _process_thinking_tags(self, content: str) -> str:
|
||||||
|
|
Loading…
Reference in New Issue