#!/usr/bin/env bash
set -euo pipefail

# Run daVinci-MagiHuman 540p inference test inside Docker
docker run --rm --gpus all --ipc=host \
  --ulimit memlock=-1 --ulimit stack=67108864 \
  -v /home/ubuntu/checkpoints:/models \
  -v /home/ubuntu/daVinci-MagiHuman:/workspace/daVinci-MagiHuman \
  -w /workspace \
  sandai/magi-human:latest \
  bash -c '
    # Install MagiCompiler
    if [ ! -d MagiCompiler ]; then
      git clone https://github.com/SandAI-org/MagiCompiler.git
      cd MagiCompiler && pip install -r requirements.txt && pip install . && cd ..
    fi

    cd daVinci-MagiHuman

    export MASTER_ADDR=localhost
    export MASTER_PORT=6011
    export NNODES=1
    export NODE_RANK=0
    export GPUS_PER_NODE=1
    export WORLD_SIZE=1
    export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
    export NCCL_ALGO=^NVLS
    export PYTHONPATH=/workspace/daVinci-MagiHuman
    export CPU_OFFLOAD=true

    echo "=== Starting 540p inference test ==="
    echo "=== GPU Info ==="
    nvidia-smi --query-gpu=name,memory.total --format=csv,noheader

    time torchrun --nnodes=1 --node_rank=0 --nproc_per_node=1 \
      --rdzv-backend=c10d --rdzv-endpoint=localhost:6011 \
      inference/pipeline/entry.py \
      --config-load-path example/sr_540p/config.json \
      --prompt "$(cat example/assets/prompt.txt)" \
      --image_path example/assets/image.png \
      --seconds 5 \
      --br_width 448 \
      --br_height 256 \
      --sr_width 896 \
      --sr_height 512 \
      --output_path /workspace/daVinci-MagiHuman/output_test_540p
  '
