- Notifications
You must be signed in to change notification settings - Fork 5.9k
/
Copy pathconvert_aura_flow_to_diffusers.py
131 lines (102 loc) · 5.61 KB
/
convert_aura_flow_to_diffusers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
importargparse
importtorch
fromhuggingface_hubimporthf_hub_download
fromdiffusers.models.transformers.auraflow_transformer_2dimportAuraFlowTransformer2DModel
defload_original_state_dict(args):
model_pt=hf_hub_download(repo_id=args.original_state_dict_repo_id, filename="aura_diffusion_pytorch_model.bin")
state_dict=torch.load(model_pt, map_location="cpu")
returnstate_dict
defcalculate_layers(state_dict_keys, key_prefix):
dit_layers=set()
forkinstate_dict_keys:
ifkey_prefixink:
dit_layers.add(int(k.split(".")[2]))
print(f"{key_prefix}: {len(dit_layers)}")
returnlen(dit_layers)
# similar to SD3 but only for the last norm layer
defswap_scale_shift(weight, dim):
shift, scale=weight.chunk(2, dim=0)
new_weight=torch.cat([scale, shift], dim=0)
returnnew_weight
defconvert_transformer(state_dict):
converted_state_dict= {}
state_dict_keys=list(state_dict.keys())
converted_state_dict["register_tokens"] =state_dict.pop("model.register_tokens")
converted_state_dict["pos_embed.pos_embed"] =state_dict.pop("model.positional_encoding")
converted_state_dict["pos_embed.proj.weight"] =state_dict.pop("model.init_x_linear.weight")
converted_state_dict["pos_embed.proj.bias"] =state_dict.pop("model.init_x_linear.bias")
converted_state_dict["time_step_proj.linear_1.weight"] =state_dict.pop("model.t_embedder.mlp.0.weight")
converted_state_dict["time_step_proj.linear_1.bias"] =state_dict.pop("model.t_embedder.mlp.0.bias")
converted_state_dict["time_step_proj.linear_2.weight"] =state_dict.pop("model.t_embedder.mlp.2.weight")
converted_state_dict["time_step_proj.linear_2.bias"] =state_dict.pop("model.t_embedder.mlp.2.bias")
converted_state_dict["context_embedder.weight"] =state_dict.pop("model.cond_seq_linear.weight")
mmdit_layers=calculate_layers(state_dict_keys, key_prefix="double_layers")
single_dit_layers=calculate_layers(state_dict_keys, key_prefix="single_layers")
# MMDiT blocks 🎸.
foriinrange(mmdit_layers):
# feed-forward
path_mapping= {"mlpX": "ff", "mlpC": "ff_context"}
weight_mapping= {"c_fc1": "linear_1", "c_fc2": "linear_2", "c_proj": "out_projection"}
fororig_k, diffuser_kinpath_mapping.items():
fork, vinweight_mapping.items():
converted_state_dict[f"joint_transformer_blocks.{i}.{diffuser_k}.{v}.weight"] =state_dict.pop(
f"model.double_layers.{i}.{orig_k}.{k}.weight"
)
# norms
path_mapping= {"modX": "norm1", "modC": "norm1_context"}
fororig_k, diffuser_kinpath_mapping.items():
converted_state_dict[f"joint_transformer_blocks.{i}.{diffuser_k}.linear.weight"] =state_dict.pop(
f"model.double_layers.{i}.{orig_k}.1.weight"
)
# attns
x_attn_mapping= {"w2q": "to_q", "w2k": "to_k", "w2v": "to_v", "w2o": "to_out.0"}
context_attn_mapping= {"w1q": "add_q_proj", "w1k": "add_k_proj", "w1v": "add_v_proj", "w1o": "to_add_out"}
forattn_mappingin [x_attn_mapping, context_attn_mapping]:
fork, vinattn_mapping.items():
converted_state_dict[f"joint_transformer_blocks.{i}.attn.{v}.weight"] =state_dict.pop(
f"model.double_layers.{i}.attn.{k}.weight"
)
# Single-DiT blocks.
foriinrange(single_dit_layers):
# feed-forward
mapping= {"c_fc1": "linear_1", "c_fc2": "linear_2", "c_proj": "out_projection"}
fork, vinmapping.items():
converted_state_dict[f"single_transformer_blocks.{i}.ff.{v}.weight"] =state_dict.pop(
f"model.single_layers.{i}.mlp.{k}.weight"
)
# norms
converted_state_dict[f"single_transformer_blocks.{i}.norm1.linear.weight"] =state_dict.pop(
f"model.single_layers.{i}.modCX.1.weight"
)
# attns
x_attn_mapping= {"w1q": "to_q", "w1k": "to_k", "w1v": "to_v", "w1o": "to_out.0"}
fork, vinx_attn_mapping.items():
converted_state_dict[f"single_transformer_blocks.{i}.attn.{v}.weight"] =state_dict.pop(
f"model.single_layers.{i}.attn.{k}.weight"
)
# Final blocks.
converted_state_dict["proj_out.weight"] =state_dict.pop("model.final_linear.weight")
converted_state_dict["norm_out.linear.weight"] =swap_scale_shift(state_dict.pop("model.modF.1.weight"), dim=None)
returnconverted_state_dict
@torch.no_grad()
defpopulate_state_dict(args):
original_state_dict=load_original_state_dict(args)
state_dict_keys=list(original_state_dict.keys())
mmdit_layers=calculate_layers(state_dict_keys, key_prefix="double_layers")
single_dit_layers=calculate_layers(state_dict_keys, key_prefix="single_layers")
converted_state_dict=convert_transformer(original_state_dict)
model_diffusers=AuraFlowTransformer2DModel(
num_mmdit_layers=mmdit_layers, num_single_dit_layers=single_dit_layers
)
model_diffusers.load_state_dict(converted_state_dict, strict=True)
returnmodel_diffusers
if__name__=="__main__":
parser=argparse.ArgumentParser()
parser.add_argument("--original_state_dict_repo_id", default="AuraDiffusion/auradiffusion-v0.1a0", type=str)
parser.add_argument("--dump_path", default="aura-flow", type=str)
parser.add_argument("--hub_id", default=None, type=str)
args=parser.parse_args()
model_diffusers=populate_state_dict(args)
model_diffusers.save_pretrained(args.dump_path)
ifargs.hub_idisnotNone:
model_diffusers.push_to_hub(args.hub_id)