File size: 945 Bytes
bbaf249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from transformers import Qwen2_5_VLForConditionalGeneration
import torch
import os

# Use the path from the workspace info
model_path = "/data/shuimu.chen/Qwen2.5-VL-Instruct"

print(f"Checking model at {model_path}")
if not os.path.exists(model_path):
    print(f"Model path does not exist: {model_path}")
    exit(1)

try:
    # Load on meta device to avoid OOM and be fast
    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path, trust_remote_code=True, device_map="cpu", torch_dtype="auto")
    print("Model loaded successfully.")
    print("First 20 Parameter names:")
    for i, (name, _) in enumerate(model.named_parameters()):
        print(name)
        if i > 20:
            break
            
    print("\nSearching for embed_tokens:")
    for name, _ in model.named_parameters():
        if "embed_tokens" in name:
            print(name)
            
except Exception as e:
    print(f"Error loading model: {e}")