File size: 856 Bytes
8333668 653bb2f 46a86c9 653bb2f 21b8280 52494dd 387fdbf bc2999d 387fdbf 8333668 b9a13da 237d997 8333668 387fdbf bff7e15 bc2999d 52494dd 46a86c9 157fd87 092b6ff 63d7840 bc2999d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
#!/bin/bash
export HF_HOME="/data/.huggingface"
echo "PWD: $(pwd)"
echo $HF_TOKEN > .hf_token
echo "LS: $(ls -als)"
while true; do nvidia-smi; sleep 600; done &
python3 -c "import torch; \
print(f\"is availeble = {torch.cuda.is_available()}\"); \
print(f\"device count = {torch.cuda.device_count()}\"); \
print(f\"current device = {torch.cuda.current_device()}\")"
python3 -m serve.controller --host 0.0.0.0 --port 10000 &
P1=$!
sleep 30
python3 -m serve.gradio_web_server --controller http://127.0.0.1:10000 --model-list-mode reload & # --share
P2=$!
python3 -m interactive_demo --port 40000 --model_id prism-dinosiglip+7b &
P3=$!
# python -m interactive_demo --port 40001 --model_family llava-v15 --model_id llava-v1.5-7b --model_dir liuhaotian/llava-v1.5-7b &
# P4=$!
ls -als $HF_HOME
tree --du -h $HF_HOME
wait $P1 $P2 $P3
# $P4 |