TimeSearch-R-raw / scripts /train_no_vllm_debug.sh
CSDDSFSFSAFSAF's picture
Add files using upload-large-folder tool
d50283e verified
DATETIME=$(date '+%Y-%m-%d-%H')
RUN_NAME="timesearch-r-retrain"
OUTPUT_DIR=/data/shuimu.chen/TimeSearch-R/experiment/$RUN_NAME/$DATETIME
mkdir -p $OUTPUT_DIR
export WANDB_PROJECT=TimeSearch-R-ColdStart
export WANDB_NAME=$RUN_NAME
export LOG_PATH=${OUTPUT_DIR}/log.txt
export DEBUG=true
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,6
# export TOKENIZERS_PARALLELISM=false
# export OMP_NUM_THREADS=2
# export MKL_NUM_THREADS=2
export PYTHONPATH=".:$PYTHONPATH"
export SIGLIP_URL=grpc://127.0.0.1:51000
# export LLM_AS_A_JUDGE_BASE=http://127.0.0.1:18901/v1
# Local training configuration
NUM_GPUS=2
MASTER_PORT=29500
echo "Local training mode: ${NUM_GPUS} GPUs on localhost:${MASTER_PORT}"
TRAIN_PATH=configs/dataset.yaml
VIDEO_ROOT=/data/shuimu.chen/LongVideoBench/LongVideoHaystack/videos
# MODEL_BASE=/data/shuimu.chen/Qwen2.5-VL-Instruct
MODEL_BASE=/data/shuimu.chen/Qwen2.5-VL-3B-Instruct
# MODEL_BASE=/data/shuimu.chen/Video-R1/Qwen2.5-VL_COT_SFT_offitial
# --max_prompt_length 18000 \
# --max_completion_length 16000 \
torchrun --nproc_per_node=${NUM_GPUS} --nnodes=1 --node_rank=0 \
--master_addr=localhost --master_port=${MASTER_PORT} \
time_r1/train.py \
--deepspeed scripts/zero3.json \
--output_dir $OUTPUT_DIR \
--model_name_or_path $MODEL_BASE \
--train_data_path $TRAIN_PATH \
--video_folder $VIDEO_ROOT \
--reward_func v8 \
--prompt_template v4 \
--tool_name_list seek_video_frames \
--max_interaction_turns 8 \
--max_prompt_length 18000 \
--max_completion_length 16000 \
--max_completion_length_per_turn 256 \
--total_video_tokens 10240 \
--max_frames 30 \
--min_per_frame_tokens 12 \
--max_per_frame_tokens 128 \
--num_generations 4 \
--scale_rewards false \
--beta 0.005 \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 2 \
--steps_per_generation 1 \
--dataloader_num_workers 2 \
--logging_steps 1 \
--bf16 \
--torch_dtype bfloat16 \
--data_seed 42 \
--gradient_checkpointing true \
--attn_implementation flash_attention_2 \
--num_train_epochs 2 \
--run_name $RUN_NAME \
--report_to wandb \
--save_steps 200 \
--save_only_model true \
--use_vllm false \
--vllm_mode colocate \
--vllm_gpu_memory_utilization 0.8 \
--shuffle_dataset true \
--replay_buffer_type dapo \
--use_counterfactual_reasoning true