"""Debug streaming to see chunk sizes."""
import time, torch
from faster_qwen3_tts import FasterQwen3TTS

ref_audio = "/home/ubuntu/vibevoice/demo/voices/modi.wav"
ref_text = "मेरे प्यारे देशवासियों, मुझे सीतापुर के ओजस्वी ने लिखा है कि अमृत महोत्सव से जुड़ी चर्चाएं उन्हें खूब पसंद आ रही हैं।"
test_text = "मेरे प्यारे देशवासियों, आज मैं आपके साथ कुछ बहुत ज़रूरी बातें करना चाहता हूँ. हमारा देश एक नये दौर में प्रवेश कर रहा है."

model = FasterQwen3TTS.from_pretrained("Qwen/Qwen3-TTS-12Hz-1.7B-Base")

# Warmup
for chunk, sr, t in model.generate_voice_clone_streaming(
    text="Test.", language="Auto", ref_audio=ref_audio, ref_text=ref_text, chunk_size=8,
):
    pass

print("Streaming chunks:")
all_chunks = []
t0 = time.perf_counter()
for i, (chunk, sr, timing) in enumerate(model.generate_voice_clone_streaming(
    text=test_text, language="Auto",
    ref_audio=ref_audio, ref_text=ref_text,
    chunk_size=8,
)):
    elapsed = time.perf_counter() - t0
    if isinstance(chunk, torch.Tensor):
        shape = chunk.shape
        samples = chunk.numel()
    else:
        shape = "numpy"
        samples = len(chunk) if hasattr(chunk, '__len__') else 0
    dur = samples / sr if sr > 0 else 0
    print(f"  Chunk {i:2d}: shape={shape}, samples={samples}, dur={dur:.3f}s, elapsed={elapsed:.3f}s, timing={timing}")
    all_chunks.append(chunk)

total = time.perf_counter() - t0
print(f"\nTotal chunks: {len(all_chunks)}, Total time: {total:.2f}s")

# Try concatenating differently
if all_chunks:
    if isinstance(all_chunks[0], torch.Tensor):
        # Check dimensions
        for i, c in enumerate(all_chunks):
            print(f"  Chunk {i}: shape={c.shape}, dtype={c.dtype}")
        full = torch.cat(all_chunks, dim=-1)
        print(f"Concatenated shape: {full.shape}, total samples: {full.numel()}, duration: {full.numel()/sr:.2f}s")
