70 lines
1.9 KiB
Python
Executable File
70 lines
1.9 KiB
Python
Executable File
#!/usr/bin/env python
|
|
"""
|
|
Test Query to Report Script
|
|
|
|
This script tests the query_to_report.py script with a sample query.
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import asyncio
|
|
import argparse
|
|
from datetime import datetime
|
|
|
|
# Add parent directory to path to import modules
|
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
|
|
from scripts.query_to_report import query_to_report
|
|
|
|
|
|
async def run_test(use_mock: bool = False):
|
|
"""
|
|
Run a test of the query to report workflow.
|
|
|
|
Args:
|
|
use_mock: If True, use mock data instead of making actual API calls
|
|
"""
|
|
# Sample query
|
|
query = "What are the latest advancements in quantum computing?"
|
|
|
|
# Generate timestamp for unique output file
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
output_file = f"report_{timestamp}.md"
|
|
|
|
# Run the workflow
|
|
await query_to_report(
|
|
query=query,
|
|
output_file=output_file,
|
|
num_results=5, # Limit to 5 results per engine for faster testing
|
|
use_mock=use_mock
|
|
)
|
|
|
|
print(f"\nTest completed successfully!")
|
|
print(f"Report saved to: {output_file}")
|
|
|
|
# Print the first few lines of the report
|
|
try:
|
|
with open(output_file, 'r', encoding='utf-8') as f:
|
|
preview = f.read(500)
|
|
print("\nReport Preview:")
|
|
print("-" * 80)
|
|
print(preview + "...")
|
|
print("-" * 80)
|
|
except Exception as e:
|
|
print(f"Error reading report: {e}")
|
|
|
|
|
|
def main():
|
|
"""Main function to parse arguments and run the test."""
|
|
parser = argparse.ArgumentParser(description='Test the query to report workflow')
|
|
parser.add_argument('--use-mock', '-m', action='store_true', help='Use mock data instead of API calls')
|
|
|
|
args = parser.parse_args()
|
|
|
|
# Run the test
|
|
asyncio.run(run_test(use_mock=args.use_mock))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|