102 lines
3.7 KiB
Python
Executable File
102 lines
3.7 KiB
Python
Executable File
#!/usr/bin/env python
|
|
"""
|
|
Test Query to Report Script with Electric Vehicles Query
|
|
|
|
This script tests the query_to_report.py script with a query about the impact of electric vehicles.
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import asyncio
|
|
import argparse
|
|
from datetime import datetime
|
|
|
|
# Add parent directory to path to import modules
|
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
|
|
from scripts.query_to_report import query_to_report
|
|
from report.report_detail_levels import get_report_detail_level_manager
|
|
|
|
|
|
async def run_ev_test(detail_level: str = "standard", use_mock: bool = False):
|
|
"""
|
|
Run a test of the query to report workflow with an electric vehicles query.
|
|
|
|
Args:
|
|
detail_level: Level of detail for the report (brief, standard, detailed, comprehensive)
|
|
use_mock: If True, use mock data instead of making actual API calls
|
|
"""
|
|
# Query about electric vehicles
|
|
query = "What is the environmental and economic impact of electric vehicles compared to traditional vehicles?"
|
|
|
|
# Generate timestamp for unique output file
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
output_file = f"ev_report_{timestamp}_{detail_level}.md"
|
|
|
|
print(f"Processing query: {query}")
|
|
print(f"Detail level: {detail_level}")
|
|
print(f"This may take a few minutes depending on the number of search results and API response times...")
|
|
|
|
# Get detail level configuration
|
|
detail_level_manager = get_report_detail_level_manager()
|
|
config = detail_level_manager.get_detail_level_config(detail_level)
|
|
|
|
# Print detail level configuration
|
|
print(f"\nDetail level configuration:")
|
|
print(f" Number of results per search engine: {config.get('num_results')}")
|
|
print(f" Token budget: {config.get('token_budget')}")
|
|
print(f" Chunk size: {config.get('chunk_size')}")
|
|
print(f" Overlap size: {config.get('overlap_size')}")
|
|
print(f" Model: {config.get('model')}")
|
|
|
|
# Run the workflow
|
|
await query_to_report(
|
|
query=query,
|
|
output_file=output_file,
|
|
detail_level=detail_level,
|
|
use_mock=use_mock
|
|
)
|
|
|
|
print(f"\nTest completed successfully!")
|
|
print(f"Report saved to: {output_file}")
|
|
|
|
# Print the first few lines of the report
|
|
try:
|
|
with open(output_file, 'r', encoding='utf-8') as f:
|
|
preview = f.read(1000) # Show a larger preview
|
|
print("\nReport Preview:")
|
|
print("-" * 80)
|
|
print(preview + "...")
|
|
print("-" * 80)
|
|
except Exception as e:
|
|
print(f"Error reading report: {e}")
|
|
|
|
|
|
def main():
|
|
"""Main function to parse arguments and run the test."""
|
|
parser = argparse.ArgumentParser(description='Test the query to report workflow with EV query')
|
|
parser.add_argument('--detail-level', '-d', type=str, default='standard',
|
|
choices=['brief', 'standard', 'detailed', 'comprehensive'],
|
|
help='Level of detail for the report')
|
|
parser.add_argument('--use-mock', '-m', action='store_true', help='Use mock data instead of API calls')
|
|
parser.add_argument('--list-detail-levels', action='store_true',
|
|
help='List available detail levels with descriptions and exit')
|
|
|
|
args = parser.parse_args()
|
|
|
|
# List detail levels if requested
|
|
if args.list_detail_levels:
|
|
detail_level_manager = get_report_detail_level_manager()
|
|
detail_levels = detail_level_manager.get_available_detail_levels()
|
|
print("Available detail levels:")
|
|
for level, description in detail_levels:
|
|
print(f" {level}: {description}")
|
|
return
|
|
|
|
# Run the test
|
|
asyncio.run(run_ev_test(detail_level=args.detail_level, use_mock=args.use_mock))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|