ehartford commited on
Commit
478ce39
·
verified ·
1 Parent(s): 1c27314

Create minimax_to_bf16.py

Browse files
Files changed (1) hide show
  1. minimax_to_bf16.py +142 -0
minimax_to_bf16.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import json
4
+ from argparse import ArgumentParser
5
+ from glob import glob
6
+ from tqdm import tqdm
7
+
8
+ import torch
9
+ from safetensors.torch import load_file, save_file
10
+
11
+
12
+ def weight_dequant_fp8(weight_fp8, scale_inv):
13
+ """
14
+ Dequantize FP8 weights to BF16 using scale_inv.
15
+
16
+ Args:
17
+ weight_fp8: FP8 tensor
18
+ scale_inv: Inverse scale tensor (F32)
19
+
20
+ Returns:
21
+ BF16 tensor
22
+ """
23
+ # Convert FP8 to float32 first
24
+ weight_f32 = weight_fp8.to(torch.float32)
25
+
26
+ # Apply inverse scaling
27
+ # scale_inv shape is typically [out_features_blocks, in_features_blocks]
28
+ # We need to broadcast it properly to match weight dimensions
29
+ if scale_inv.dim() == 2:
30
+ # Expand scale_inv to match weight dimensions
31
+ out_blocks, in_blocks = scale_inv.shape
32
+ weight_blocks_out = weight_fp8.shape[0] // out_blocks
33
+ weight_blocks_in = weight_fp8.shape[1] // in_blocks
34
+
35
+ # Repeat scale_inv to match weight shape
36
+ scale_inv_expanded = scale_inv.repeat_interleave(weight_blocks_out, dim=0)
37
+ scale_inv_expanded = scale_inv_expanded.repeat_interleave(weight_blocks_in, dim=1)
38
+
39
+ weight_f32 = weight_f32 * scale_inv_expanded
40
+ else:
41
+ weight_f32 = weight_f32 * scale_inv
42
+
43
+ # Convert to BF16
44
+ return weight_f32.to(torch.bfloat16)
45
+
46
+
47
+ def main(fp8_path, bf16_path):
48
+ torch.set_default_dtype(torch.bfloat16)
49
+ os.makedirs(bf16_path, exist_ok=True)
50
+
51
+ model_index_file = os.path.join(fp8_path, "model.safetensors.index.json")
52
+ with open(model_index_file, "r") as f:
53
+ model_index = json.load(f)
54
+
55
+ weight_map = model_index["weight_map"]
56
+
57
+ # Cache for loaded safetensor files
58
+ loaded_files = {}
59
+ fp8_weight_names = []
60
+
61
+ # Helper function to get tensor from the correct file
62
+ def get_tensor(tensor_name):
63
+ if tensor_name not in weight_map:
64
+ return None
65
+ file_name = weight_map[tensor_name]
66
+ if file_name not in loaded_files:
67
+ file_path = os.path.join(fp8_path, file_name)
68
+ loaded_files[file_name] = load_file(file_path, device="cuda")
69
+ return loaded_files[file_name][tensor_name]
70
+
71
+ safetensor_files = list(glob(os.path.join(fp8_path, "*.safetensors")))
72
+ safetensor_files = [f for f in safetensor_files if not f.endswith(".index.json")]
73
+ safetensor_files.sort()
74
+
75
+ print(f"Found {len(safetensor_files)} safetensor files to convert")
76
+
77
+ for safetensor_file in tqdm(safetensor_files, desc="Converting files"):
78
+ file_name = os.path.basename(safetensor_file)
79
+ current_state_dict = load_file(safetensor_file, device="cuda")
80
+ loaded_files[file_name] = current_state_dict
81
+
82
+ new_state_dict = {}
83
+
84
+ for weight_name, weight in current_state_dict.items():
85
+ # Skip scale_inv tensors
86
+ if weight_name.endswith("_scale_inv"):
87
+ continue
88
+
89
+ # Check if this is an FP8 weight (F8_E4M3 has element_size of 1)
90
+ if weight.dtype == torch.float8_e4m3fn or weight.element_size() == 1:
91
+ scale_inv_name = f"{weight_name}_scale_inv"
92
+ scale_inv = get_tensor(scale_inv_name)
93
+
94
+ if scale_inv is not None:
95
+ fp8_weight_names.append(weight_name)
96
+ new_state_dict[weight_name] = weight_dequant_fp8(weight, scale_inv)
97
+ else:
98
+ print(f"Warning: Missing scale_inv tensor for {weight_name}, keeping as-is")
99
+ new_state_dict[weight_name] = weight
100
+ else:
101
+ # Already BF16 or F32, keep as-is
102
+ new_state_dict[weight_name] = weight
103
+
104
+ # Save converted weights
105
+ new_safetensor_file = os.path.join(bf16_path, file_name)
106
+ save_file(new_state_dict, new_safetensor_file)
107
+
108
+ # Memory management: keep only the 2 most recently used files
109
+ if len(loaded_files) > 2:
110
+ oldest_file = next(iter(loaded_files))
111
+ del loaded_files[oldest_file]
112
+ torch.cuda.empty_cache()
113
+
114
+ # Update model index - remove all _scale_inv entries
115
+ print("Updating model index...")
116
+ new_weight_map = {}
117
+ for weight_name, file_name in weight_map.items():
118
+ if not weight_name.endswith("_scale_inv"):
119
+ new_weight_map[weight_name] = file_name
120
+
121
+ new_model_index = {
122
+ "metadata": model_index.get("metadata", {}),
123
+ "weight_map": new_weight_map
124
+ }
125
+
126
+ new_model_index_file = os.path.join(bf16_path, "model.safetensors.index.json")
127
+ with open(new_model_index_file, "w") as f:
128
+ json.dump(new_model_index, f, indent=2)
129
+
130
+ print(f"Conversion complete! Converted {len(fp8_weight_names)} FP8 weights to BF16")
131
+ print(f"Output saved to: {bf16_path}")
132
+
133
+
134
+ if __name__ == "__main__":
135
+ parser = ArgumentParser(description="Convert MiniMax-M2 from FP8 to BF16")
136
+ parser.add_argument("--input-fp8-hf-path", type=str, required=True,
137
+ help="Path to the FP8 model directory")
138
+ parser.add_argument("--output-bf16-hf-path", type=str, required=True,
139
+ help="Path to save the BF16 model")
140
+ args = parser.parse_args()
141
+
142
+ main(args.input_fp8_hf_path, args.output_bf16_hf_path)