52 lines
1.6 KiB
Bash
52 lines
1.6 KiB
Bash
#!/bin/bash
|
|
|
|
# The number of parameters is not aligned
|
|
export LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib:/root/miniconda3/lib:$LD_LIBRARY_PATH
|
|
export HCCL_CONNECT_TIMEOUT=1200
|
|
export COMBINED_ENABLE=1
|
|
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
|
|
|
# please fill these path configurations
|
|
CHECKPOINT="your model directory path"
|
|
TOKENIZER_PATH="your tokenizer path"
|
|
|
|
# Change for multinode config
|
|
MASTER_ADDR=localhost
|
|
MASTER_PORT=6001
|
|
NNODES=1
|
|
NODE_RANK=0
|
|
NPUS_PER_NODE=8
|
|
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
|
|
|
|
DISTRIBUTED_ARGS="--nproc_per_node $NPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
|
|
|
|
torchrun $DISTRIBUTED_ARGS inference_qwen.py \
|
|
--tensor-model-parallel-size 8 \
|
|
--pipeline-model-parallel-size 1 \
|
|
--num-layers 80 \
|
|
--hidden-size 8192 \
|
|
--num-attention-heads 64 \
|
|
--ffn-hidden-size 24576 \
|
|
--max-position-embeddings 32768 \
|
|
--seq-length 8192 \
|
|
--make-vocab-size-divisible-by 64 \
|
|
--untie-embeddings-and-output-weights \
|
|
--micro-batch-size 1 \
|
|
--swiglu \
|
|
--disable-bias-linear \
|
|
--tokenizer-type PretrainedFromHF \
|
|
--tokenizer-name-or-path ${TOKENIZER_PATH} \
|
|
--load ${CHECKPOINT} \
|
|
--normalization RMSNorm \
|
|
--position-embedding-type rope \
|
|
--norm-epsilon 1e-6 \
|
|
--hidden-dropout 0 \
|
|
--attention-dropout 0 \
|
|
--tokenizer-not-use-fast \
|
|
--add-qkv-bias \
|
|
--rotary-base 1000000 \
|
|
--tokenizer-kwargs 'eos_token' '<|endoftext|>' 'pad_token' '<|extra_0|>' \
|
|
--max-new-tokens 256 \
|
|
--seed 42 \
|
|
--bf16
|