| from transformers import Qwen2_5_VLForConditionalGeneration | |
| import torch | |
| import os | |
| # Use the path from the workspace info | |
| model_path = "/data/shuimu.chen/Qwen2.5-VL-Instruct" | |
| print(f"Checking model at {model_path}") | |
| if not os.path.exists(model_path): | |
| print(f"Model path does not exist: {model_path}") | |
| exit(1) | |
| try: | |
| # Load on meta device to avoid OOM and be fast | |
| model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path, trust_remote_code=True, device_map="cpu", torch_dtype="auto") | |
| print("Model loaded successfully.") | |
| print("Parameter names:") | |
| for name, _ in model.named_parameters(): | |
| if "visual" in name or "patch" in name: | |
| print(name) | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |