|
|
|
|
|
import os |
|
|
import json |
|
|
from argparse import ArgumentParser |
|
|
from glob import glob |
|
|
from tqdm import tqdm |
|
|
|
|
|
import torch |
|
|
from safetensors.torch import load_file, save_file |
|
|
|
|
|
|
|
|
def weight_dequant_fp8(weight_fp8, scale_inv): |
|
|
""" |
|
|
Dequantize FP8 weights to BF16 using scale_inv. |
|
|
|
|
|
Args: |
|
|
weight_fp8: FP8 tensor |
|
|
scale_inv: Inverse scale tensor (F32) |
|
|
|
|
|
Returns: |
|
|
BF16 tensor |
|
|
""" |
|
|
|
|
|
weight_f32 = weight_fp8.to(torch.float32) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if scale_inv.dim() == 2: |
|
|
|
|
|
out_blocks, in_blocks = scale_inv.shape |
|
|
weight_blocks_out = weight_fp8.shape[0] // out_blocks |
|
|
weight_blocks_in = weight_fp8.shape[1] // in_blocks |
|
|
|
|
|
|
|
|
scale_inv_expanded = scale_inv.repeat_interleave(weight_blocks_out, dim=0) |
|
|
scale_inv_expanded = scale_inv_expanded.repeat_interleave(weight_blocks_in, dim=1) |
|
|
|
|
|
weight_f32 = weight_f32 * scale_inv_expanded |
|
|
else: |
|
|
weight_f32 = weight_f32 * scale_inv |
|
|
|
|
|
|
|
|
return weight_f32.to(torch.bfloat16) |
|
|
|
|
|
|
|
|
def main(fp8_path, bf16_path): |
|
|
torch.set_default_dtype(torch.bfloat16) |
|
|
os.makedirs(bf16_path, exist_ok=True) |
|
|
|
|
|
model_index_file = os.path.join(fp8_path, "model.safetensors.index.json") |
|
|
with open(model_index_file, "r") as f: |
|
|
model_index = json.load(f) |
|
|
|
|
|
weight_map = model_index["weight_map"] |
|
|
|
|
|
|
|
|
loaded_files = {} |
|
|
fp8_weight_names = [] |
|
|
|
|
|
|
|
|
def get_tensor(tensor_name): |
|
|
if tensor_name not in weight_map: |
|
|
return None |
|
|
file_name = weight_map[tensor_name] |
|
|
if file_name not in loaded_files: |
|
|
file_path = os.path.join(fp8_path, file_name) |
|
|
loaded_files[file_name] = load_file(file_path, device="cuda") |
|
|
return loaded_files[file_name][tensor_name] |
|
|
|
|
|
safetensor_files = list(glob(os.path.join(fp8_path, "*.safetensors"))) |
|
|
safetensor_files = [f for f in safetensor_files if not f.endswith(".index.json")] |
|
|
safetensor_files.sort() |
|
|
|
|
|
print(f"Found {len(safetensor_files)} safetensor files to convert") |
|
|
|
|
|
for safetensor_file in tqdm(safetensor_files, desc="Converting files"): |
|
|
file_name = os.path.basename(safetensor_file) |
|
|
current_state_dict = load_file(safetensor_file, device="cuda") |
|
|
loaded_files[file_name] = current_state_dict |
|
|
|
|
|
new_state_dict = {} |
|
|
|
|
|
for weight_name, weight in current_state_dict.items(): |
|
|
|
|
|
if weight_name.endswith("_scale_inv"): |
|
|
continue |
|
|
|
|
|
|
|
|
if weight.dtype == torch.float8_e4m3fn or weight.element_size() == 1: |
|
|
scale_inv_name = f"{weight_name}_scale_inv" |
|
|
scale_inv = get_tensor(scale_inv_name) |
|
|
|
|
|
if scale_inv is not None: |
|
|
fp8_weight_names.append(weight_name) |
|
|
new_state_dict[weight_name] = weight_dequant_fp8(weight, scale_inv) |
|
|
else: |
|
|
print(f"Warning: Missing scale_inv tensor for {weight_name}, keeping as-is") |
|
|
new_state_dict[weight_name] = weight |
|
|
else: |
|
|
|
|
|
new_state_dict[weight_name] = weight |
|
|
|
|
|
|
|
|
new_safetensor_file = os.path.join(bf16_path, file_name) |
|
|
save_file(new_state_dict, new_safetensor_file) |
|
|
|
|
|
|
|
|
if len(loaded_files) > 2: |
|
|
oldest_file = next(iter(loaded_files)) |
|
|
del loaded_files[oldest_file] |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
print("Updating model index...") |
|
|
new_weight_map = {} |
|
|
for weight_name, file_name in weight_map.items(): |
|
|
if not weight_name.endswith("_scale_inv"): |
|
|
new_weight_map[weight_name] = file_name |
|
|
|
|
|
new_model_index = { |
|
|
"metadata": model_index.get("metadata", {}), |
|
|
"weight_map": new_weight_map |
|
|
} |
|
|
|
|
|
new_model_index_file = os.path.join(bf16_path, "model.safetensors.index.json") |
|
|
with open(new_model_index_file, "w") as f: |
|
|
json.dump(new_model_index, f, indent=2) |
|
|
|
|
|
print(f"Conversion complete! Converted {len(fp8_weight_names)} FP8 weights to BF16") |
|
|
print(f"Output saved to: {bf16_path}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
parser = ArgumentParser(description="Convert MiniMax-M2 from FP8 to BF16") |
|
|
parser.add_argument("--input-fp8-hf-path", type=str, required=True, |
|
|
help="Path to the FP8 model directory") |
|
|
parser.add_argument("--output-bf16-hf-path", type=str, required=True, |
|
|
help="Path to save the BF16 model") |
|
|
args = parser.parse_args() |
|
|
|
|
|
main(args.input_fp8_hf_path, args.output_bf16_hf_path) |
|
|
|