ira/tests/report/test_detail_levels.py

124 lines
4.5 KiB
Python
Executable File

#!/usr/bin/env python
"""
Test Detail Levels Script
This script tests the report generation with different detail levels
for the same query to demonstrate the differences.
"""
import os
import sys
import asyncio
import argparse
from datetime import datetime
# Add parent directory to path to import modules
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from scripts.query_to_report import query_to_report
from report.report_detail_levels import get_report_detail_level_manager, DetailLevel
async def run_detail_level_test(query: str, use_mock: bool = False):
"""
Run a test of the query to report workflow with different detail levels.
Args:
query: The query to process
use_mock: If True, use mock data instead of making actual API calls
"""
# Generate timestamp for unique output files
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Get detail level manager
detail_level_manager = get_report_detail_level_manager()
# Get all detail levels
detail_levels = [level.value for level in DetailLevel]
print(f"Processing query: {query}")
print(f"Testing {len(detail_levels)} detail levels: {', '.join(detail_levels)}")
print(f"This may take several minutes to complete all detail levels...")
# Process each detail level
for detail_level in detail_levels:
print(f"\n{'=' * 80}")
print(f"Processing detail level: {detail_level}")
# Get detail level configuration
config = detail_level_manager.get_detail_level_config(detail_level)
# Print detail level configuration
print(f"Detail level configuration:")
print(f" Number of results per search engine: {config.get('num_results')}")
print(f" Token budget: {config.get('token_budget')}")
print(f" Chunk size: {config.get('chunk_size')}")
print(f" Overlap size: {config.get('overlap_size')}")
print(f" Model: {config.get('model')}")
# Set output file
output_file = f"report_{timestamp}_{detail_level}.md"
# Run the workflow
start_time = datetime.now()
print(f"Started at: {start_time.strftime('%H:%M:%S')}")
await query_to_report(
query=query,
output_file=output_file,
detail_level=detail_level,
use_mock=use_mock
)
end_time = datetime.now()
duration = end_time - start_time
print(f"Completed at: {end_time.strftime('%H:%M:%S')}")
print(f"Duration: {duration.total_seconds():.2f} seconds")
# Get report file size
file_size = os.path.getsize(output_file)
print(f"Report saved to: {output_file}")
print(f"Report size: {file_size} bytes")
# Count words in report
try:
with open(output_file, 'r', encoding='utf-8') as f:
content = f.read()
word_count = len(content.split())
print(f"Word count: {word_count}")
except Exception as e:
print(f"Error reading report: {e}")
print(f"\n{'=' * 80}")
print(f"All detail levels processed successfully!")
print(f"Reports saved with prefix: report_{timestamp}_")
def main():
"""Main function to parse arguments and run the test."""
parser = argparse.ArgumentParser(description='Test report generation with different detail levels')
parser.add_argument('--query', '-q', type=str,
default="What is the environmental and economic impact of electric vehicles compared to traditional vehicles?",
help='The query to process')
parser.add_argument('--use-mock', '-m', action='store_true', help='Use mock data instead of API calls')
parser.add_argument('--list-detail-levels', action='store_true',
help='List available detail levels with descriptions and exit')
args = parser.parse_args()
# List detail levels if requested
if args.list_detail_levels:
detail_level_manager = get_report_detail_level_manager()
detail_levels = detail_level_manager.get_available_detail_levels()
print("Available detail levels:")
for level, description in detail_levels:
print(f" {level}: {description}")
return
# Run the test
asyncio.run(run_detail_level_test(query=args.query, use_mock=args.use_mock))
if __name__ == "__main__":
main()