{"timestamp":1770797750852,"paths":["./stress_results_opt_local.json","./.cursorignore","./.env.example","./.env","./.gitignore","./requirements.txt","./README.md","./.cursorrules","./veena3modal/local_server.py","./veena3modal/__init__.py","./veena3modal/app.py","./veena3modal/__main__.py","./veena3modal/audio/__init__.py","./veena3modal/audio/crossfade.py","./veena3modal/audio/utils.py","./veena3modal/audio/encoder.py","./veena3modal/shared/__init__.py","./veena3modal/shared/logging.py","./veena3modal/shared/metrics.py","./veena3modal/api/schemas.py","./veena3modal/api/auth.py","./veena3modal/api/fastapi_app.py","./veena3modal/api/headers.py","./veena3modal/api/websocket_handler.py","./veena3modal/api/rate_limiter.py","./veena3modal/api/__init__.py","./veena3modal/api/error_handlers.py","./veena3modal/services/__init__.py","./veena3modal/services/sentence_store.py","./veena3modal/services/tts_runtime.py","./veena3modal/services/credits.py","./veena3modal/processing/prompt_builder.py","./veena3modal/processing/emotion_normalizer.py","./veena3modal/processing/long_text_processor.py","./veena3modal/processing/__init__.py","./veena3modal/processing/text_normalizer.py","./veena3modal/processing/text_chunker.py","./scripts/final_asr_verification.py","./scripts/measure_client_ttfb.py","./scripts/validate_text_normalization.py","./scripts/stress_test_streaming_runtime.py","./scripts/analyze_missing_words.py","./scripts/test_true_streaming.py","./scripts/asr_validation_openai.py","./scripts/stress_test_runtime_detailed.py","./scripts/quick_asr_check.py","./scripts/stress_test_production.py","./scripts/measure_ttfb_detailed_v2.py","./scripts/test_correct_format.py","./scripts/test_original_bug.py","./scripts/validate_indic_normalization.py","./scripts/wer_test_with_emotions.py","./scripts/test_fix.py","./scripts/elevenlabs_verify_fix.py","./scripts/setup_local.sh","./scripts/asr_with_openai_final.py","./scripts/pattern_analysis.py","./scripts/validate_modal_endpoint.py","./scripts/detailed_word_analysis.py","./scripts/test_chunking_no_emotions.py","./scripts/create_visual_map.py","./scripts/measure_ttfb_detailed.py","./scripts/test_all_features.py","./scripts/stress_test_local.py","./scripts/profile_pipeline.py","./scripts/validate_chunking_asr.py","./scripts/comprehensive_validation.py","./scripts/elevenlabs_asr_analysis.py","./scripts/validate_true_streaming.py","./scripts/stress_test_optimized_local.py","./scripts/final_chunking_test.sh","./external/sparktts/requirements.txt","./modal_docs/# Asynchronous API usage.md","./modal_docs/# Reserving CPU and memory.md","./modal_docs/# Memory Snapshot.md","./modal_docs/# Scaling out.md","./modal_docs/# GPU Health.md","./modal_docs/# GPU acceleration.md","./modal_docs/# Managing deployments.md","./modal_docs/# Using existing images.md","./modal_docs/# Invoking deployed functions.md","./modal_docs/# Batch Processing.md","./modal_docs/# Preemption.md","./modal_docs/# Real-time object detection with WebRTC.md","./modal_docs/# Input concurrency.md","./modal_docs/# Using CUDA on Modal.md","./modal_docs/# Run a FastRTC app on Modal.md","./modal_docs/# Cold start performance.md","./modal_docs/# Images.md","./modal_docs/# Storing model weights on Modal.md","./modal_docs/# Apps, Functions, and entrypoints.md","./modal_docs/# Continuous deployment.md","./modal_docs/# Environment variables.md","./modal_docs/# Hello world wide web!.md","./modal_docs/# Timeouts.md","./modal_docs/# Job processing.md","./modal_docs/# Failures and retries.md","./modal_docs/# QuiLLMan: Voice Chat with Moshi.md","./modal_docs/# Dynamic batching (beta).md","./modal_docs/# Volumes.md"]}