sailor2-3b-chat / runner.sh
yusufs's picture
feat(runner.sh): using runner.sh to select llm in the run time
69c6372
raw
history blame
989 Bytes
#!/bin/bash
# Check if MODEL_NAME is set and has a valid value
if [[ -z "$MODEL_NAME" ]]; then
echo "Error: MODEL_NAME is not set."
exit 1
fi
if [[ "$MODEL_NAME" != "meta-llama/Llama-3.2-3B-Instruct" && "$MODEL_NAME" != "sail/Sailor-4B-Chat" ]]; then
echo "Error: Invalid value for MODEL_NAME. Valid values are:"
echo " - meta-llama/Llama-3.2-3B-Instruct"
echo " - sail/Sailor-4B-Chat"
exit 1
fi
# Check if MODEL_REV is set
if [[ -z "$MODEL_REV" ]]; then
echo "Error: MODEL_REV is not set."
exit 1
fi
printf "Running %s using vLLM OpenAI compatible API Server at port %s\n" $MODEL_NAME "7860"
# Run the Python script with the validated environment variables
python -u /app/openai_compatible_api_server.py \
--model "${MODEL_NAME}" \
--revision "${MODEL_REV}" \
--seed 42 \
--host 0.0.0.0 \
--port 7860 \
--max-num-batched-tokens 32768 \
--max-model-len 32768 \
--dtype float16 \
--enforce-eager \
--gpu-memory-utilization 0.9