#!/bin/bash
exec > /home/ubuntu/launch_output.txt 2>&1

echo "=== Step 1: Kill old processes ==="
nvidia-smi --query-compute-apps=pid --format=csv,noheader 2>/dev/null | xargs -r kill -9 2>/dev/null
pkill -9 -f vllm 2>/dev/null
sleep 3

echo "=== Step 2: GPU status ==="
nvidia-smi

echo "=== Step 3: Launch server ==="
rm -f /home/ubuntu/vllm_server_log.txt
VLLM_ATTENTION_BACKEND=TORCH_SDPA nohup /home/ubuntu/vllm_env/bin/python3 /home/ubuntu/launch_server.py > /home/ubuntu/vllm_server_log.txt 2>&1 &
SERVER_PID=$!
echo "Server PID: $SERVER_PID"

echo "=== Step 4: Waiting 180s for model load ==="
sleep 180

echo "=== Step 5: Server log ==="
cat /home/ubuntu/vllm_server_log.txt

echo "=== Step 6: GPU status ==="
nvidia-smi

echo "=== Step 7: Test endpoint ==="
curl -s http://localhost:8091/v1/audio/voices 2>&1 || echo "NOT RESPONDING"

echo "=== DONE ==="
