-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathpretrain_unified_navit.py
More file actions
843 lines (781 loc) · 32.4 KB
/
pretrain_unified_navit.py
File metadata and controls
843 lines (781 loc) · 32.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
# Copyright 2025 Bytedance Ltd. and/or its affiliates.
# Copyright 2025 AntGroup and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
import functools
import os
import wandb
import yaml
from copy import deepcopy
from dataclasses import dataclass, field
from time import time
import itertools
import torch
import torch.distributed as dist
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
)
from torch.utils.data import DataLoader
from transformers import HfArgumentParser, set_seed
from transformers.optimization import (
get_constant_schedule_with_warmup,
get_cosine_with_min_lr_schedule_with_warmup,
)
from data.dataset_base import DataConfig, PackedDataset, collate_wrapper
from data.data_utils import add_special_tokens
from modeling.autoencoder import load_ae
from modeling.lladao import (
SiglipVisionConfig, SiglipVisionModel, LLaDAO, LLaDAOConfig, LLaDAConfig, LLaDAModelLM
)
from transformers import AutoTokenizer, DINOv3ViTModel
from train.train_utils import create_logger, get_latest_ckpt
from train.fsdp_utils import (
FSDPCheckpoint, FSDPConfig, grad_checkpoint_check_fn, fsdp_wrapper,
fsdp_ema_setup, fsdp_ema_update,
)
@dataclass
class ModelArguments:
model_path: str = field(
default="GSAI-ML/LLaDA-o",
metadata={"help": "Path of the pretrained LLaDA-o model."}
)
llm_path: str = field(
default="",
metadata={"help": "Path of the pretrained language model."}
)
llm_qk_norm: bool = field(
default=True,
metadata={"help": "Enable QK LayerNorm (qk_norm) inside the attention blocks."}
)
tie_word_embeddings: bool = field(
default=False,
metadata={"help": "Share input and output word embeddings (tied embeddings)."}
)
layer_module: str = field(
default="LLaDAMoTDecoderLayer",
metadata={"help": "Python class name of the decoder layer to instantiate."}
)
vae_path: str = field(
default="flux/vae/ae.safetensors",
metadata={"help": "Path to the pretrained VAE checkpoint for latent-space image generation."}
)
vit_path: str = field(
default="hf/siglip-so400m-14-980-flash-attn2-navit/",
metadata={"help": "Path or repo ID of the SigLIP Vision Transformer used for image understanding."}
)
dino_path: str = field(
default="hf/dinov3-vitb16-pretrain-lvd1689m/", # dinov3-vitb16-pretrain-lvd1689m, dinov3-vitl16-pretrain-lvd1689m, dinov3-vith16plus-pretrain-lvd1689m
metadata={"help": "Path of the DINOv3 Vision Transformer used for image generation."}
)
max_latent_size: int = field(
default=32,
metadata={"help": "Maximum latent grid size (patches per side) for the VAE latent tensor."}
)
latent_patch_size: int = field(
default=2,
metadata={"help": "Spatial size (in VAE pixels) covered by each latent patch."}
)
vit_patch_size: int = field(
default=14,
metadata={"help": "Patch size (pixels) for the Vision Transformer encoder."}
)
vit_max_num_patch_per_side: int = field(
default=70,
metadata={"help": "Maximum number of ViT patches along one image side after cropping / resize."}
)
connector_act: str = field(
default="gelu_pytorch_tanh",
metadata={"help": "Activation function used in the latent-to-text connector MLP."}
)
interpolate_pos: bool = field(
default=False,
metadata={"help": "Interpolate positional embeddings when image resolution differs from pre-training."}
)
vit_select_layer: int = field(
default=-2,
metadata={"help": "Which hidden layer of the ViT to take as the visual feature (negative = from the end)."}
)
vit_rope: bool = field(
default=False,
metadata={"help": "Replace ViT positional encodings with RoPE."}
)
text_cond_dropout_prob: float = field(
default=0.1,
metadata={"help": "Probability of dropping text embeddings during training."}
)
vae_cond_dropout_prob: float = field(
default=0.3,
metadata={"help": "Probability of dropping VAE latent inputs during training."}
)
vit_cond_dropout_prob: float = field(
default=0.3,
metadata={"help": "Probability of dropping ViT visual features during training."}
)
@dataclass
class DataArguments:
dataset_config_file: str = field(
default="data/configs/example.yaml",
metadata={"help": "YAML file specifying dataset groups, weights, and preprocessing rules."}
)
prefetch_factor: int = field(
default=2,
metadata={"help": "How many batches each DataLoader worker pre-loads in advance."}
)
num_workers: int = field(
default=4,
metadata={"help": "Number of background workers for the PyTorch DataLoader."}
)
max_num_tokens_per_sample: int = field(
default=16384,
metadata={"help": "Maximum tokens allowed in one raw sample; longer samples are skipped."}
)
max_num_tokens: int = field(
default=36864,
metadata={"help": "Hard limit on tokens in a packed batch; flush if adding a sample would exceed it."}
)
prefer_buffer_before: int = field(
default=16384,
metadata={"help": "While batch length is below this, pop from the overflow buffer before new sampling."}
)
max_buffer_size: int = field(
default=50,
metadata={"help": "Maximum number of oversized samples kept in the overflow buffer."}
)
data_seed: int = field(
default=42,
metadata={"help": "Seed used when shuffling / sampling data shards to ensure reproducibility."}
)
@dataclass
class TrainingArguments:
# --- modality switches ---
visual_gen: bool = field(
default=True,
metadata={"help": "Train image generation branch."}
)
visual_und: bool = field(
default=True,
metadata={"help": "Train image understanding branch."}
)
visual_und_sft: bool = field(
default=False,
metadata={"help": "Train image understanding branch with multimodal masked prediction sft."} # If set to True, subsequent conversations should not see the noisy version of previous conversations, and randomly add some eos tokens as prediction targets.
)
ada_len: bool = field(
default=False,
metadata={"help": "Use adaptive length training."} # If set to True, the masked response may be padded or truncated during training to achieve adaptive length.
)
ada_len_split: bool = field(
default=False,
metadata={"help": "Use adaptive length training with split."}
)
visual_und_always_mask_last: bool = field(
default=False,
metadata={"help": "Always mask last token in image understanding and text branch."}
)
merge_vit_text_segments: bool = field(
default=False,
metadata={"help": "If merge all vit and text segments into one segment following LLaDA-V"}
)
loss_reduction: str = field(
default="square",
metadata={"help": "Loss reduction type."}
)
visual_gen_repa: bool = field(
default=False,
metadata={"help": "Train image generation branch with repa using dinov3."}
)
visual_gen_reg: bool = field(
default=False,
metadata={"help": "Train image generation branch with reg."} # when using reg, repa is True
)
reg_weight: float = field(
default=0.0,
metadata={"help": "Weight of reg loss."}
)
repa_output_depth: int = field(
default=0,
metadata={"help": "Output depth of repa."}
)
repa_weight: float = field(
default=0.0,
metadata={"help": "Weight of repa loss."}
)
# --- bookkeeping & logging ---
results_dir: str = field(
default="results",
metadata={"help": "Root directory for logs."}
)
checkpoint_dir: str = field(
default="results/checkpoints",
metadata={"help": "Root directory for model checkpoints."}
)
wandb_project: str = field(
default="lladao",
metadata={"help": "Weights & Biases project name."}
)
wandb_name: str = field(
default="run",
metadata={"help": "Name shown in the Weights & Biases UI for this run."}
)
wandb_runid: str = field(
default="0",
metadata={"help": "Unique identifier to resume a previous W&B run, if desired."}
)
wandb_resume: str = field(
default="allow",
metadata={"help": "W&B resume mode: 'allow', 'must', or 'never'."}
)
wandb_offline: bool = field(
default=False,
metadata={"help": "Run W&B in offline mode (logs locally, sync later)."}
)
wandb_log_dir: str = field(
default="./wandb_logs",
metadata={"help": "Directory to store wandb logs when running in offline mode."}
)
# --- reproducibility & resume ---
global_seed: int = field(
default=4396,
metadata={"help": "Base random seed; actual seed is offset by rank for DDP."}
)
auto_resume: bool = field(
default=False,
metadata={"help": "Automatically pick up the latest checkpoint found in checkpoint_dir."}
)
resume_from: str = field(
default=None,
metadata={"help": "Explicit checkpoint path to resume from (overrides auto_resume)." }
)
resume_model_only: bool = field(
default=False,
metadata={"help": "Load only model weights, ignoring optimizer/scheduler states."}
)
finetune_from_ema: bool = field(
default=False,
metadata={"help": "When resume_model_only=True, load the EMA (exponential moving average) weights instead of raw weights."}
)
finetune_from_hf: bool = field(
default=False,
metadata={"help": "Whether finetune from HugginFace model."}
)
# --- reporting frequency ---
log_every: int = field(
default=10,
metadata={"help": "Print / log every N training steps."}
)
save_every: int = field(
default=2000,
metadata={"help": "Save a checkpoint every N training steps."}
)
total_steps: int = field(
default=500_000,
metadata={"help": "Total number of optimizer steps to train for."}
)
# --- optimization & scheduler ---
warmup_steps: int = field(
default=2000,
metadata={"help": "Linear warm-up steps before applying the main LR schedule."}
)
lr_scheduler: str = field(
default="constant",
metadata={"help": "Type of LR schedule: 'constant' or 'cosine'."}
)
lr: float = field(
default=1e-4,
metadata={"help": "Peak learning rate after warm-up."}
)
min_lr: float = field(
default=1e-7,
metadata={"help": "Minimum learning rate for cosine schedule (ignored for constant)."}
)
beta1: float = field(
default=0.9,
metadata={"help": "AdamW beta1 coefficient."}
)
beta2: float = field(
default=0.95,
metadata={"help": "AdamW beta2 coefficient."}
)
eps: float = field(
default=1e-15,
metadata={"help": "AdamW epsilon for numerical stability."}
)
ema: float = field(
default=0.9999,
metadata={"help": "Decay rate for the exponential moving average of model weights."}
)
max_grad_norm: int = field(
default=1.0,
metadata={"help": "Gradient clipping threshold (L2 norm)."}
)
timestep_shift: float = field(
default=1.0,
metadata={"help": "Shift applied to diffusion timestep indices (for latent prediction)."}
)
mse_weight: float = field(
default=1.0,
metadata={"help": "Scaling factor for the image-reconstruction MSE loss term."}
)
ce_weight: float = field(
default=1.0,
metadata={"help": "Scaling factor for the language cross-entropy loss term."}
)
ce_loss_reweighting: bool = field(
default=False,
metadata={"help": "Reweight CE loss by token importance (provided via ce_loss_weights)."}
)
expected_num_tokens: int = field(
default=32768,
metadata={"help": "Soft target token count; yield the batch once it reaches or exceeds this size."}
)
# --- distributed training / FSDP ---
num_replicate: int = field(
default=1,
metadata={"help": "Number of model replicas per GPU rank for tensor parallelism."}
)
num_shard: int = field(
default=8,
metadata={"help": "Number of parameter shards when using FSDP HYBRID_SHARD."}
)
sharding_strategy: str = field(
default="HYBRID_SHARD",
metadata={"help": "FSDP sharding strategy: FULL_SHARD, SHARD_GRAD_OP, HYBRID_SHARD, etc."}
)
backward_prefetch: str = field(
default="BACKWARD_PRE",
metadata={"help": "FSDP backward prefetch strategy (BACKWARD_PRE or NO_PREFETCH)."}
)
cpu_offload: bool = field(
default=False,
metadata={"help": "Enable FSDP parameter offload to CPU."}
)
# --- module freezing ---
freeze_llm: bool = field(
default=False,
metadata={"help": "Keep language-model weights fixed (no gradient updates)."}
)
freeze_vit: bool = field(
default=False,
metadata={"help": "Keep ViT weights fixed during training."}
)
freeze_vae: bool = field(
default=True,
metadata={"help": "Keep VAE weights fixed; only predict latents, don't fine-tune encoder/decoder."}
)
freeze_und: bool = field(
default=False,
metadata={"help": "Freeze the visual understanding connector layers."}
)
copy_init_moe: bool = field(
default=True,
metadata={"help": "Duplicate initial MoE experts so each has identical initialisation."}
)
use_flex: bool = field(
default=False,
metadata={"help": "Enable FLEX (flash-ext friendly) packing algorithm for sequence data."}
)
def main():
assert torch.cuda.is_available()
# Initialize process group.
# dist.init_process_group(backend="nccl")
import atorch
status = atorch.init_distributed(backend="nccl")
assert status is True
device = dist.get_rank() % torch.cuda.device_count()
torch.cuda.set_device(device)
parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging:
if dist.get_rank() == 0:
os.makedirs(training_args.results_dir, exist_ok=True)
os.makedirs(training_args.checkpoint_dir, exist_ok=True)
logger = create_logger(training_args.results_dir, dist.get_rank())
if training_args.wandb_offline:
wandb_log_dir = getattr(training_args, 'wandb_log_dir', './wandb_logs')
os.makedirs(wandb_log_dir, exist_ok=True) # Ensure the directory exists.
print(f"Wandb offline logs will be saved to: {wandb_log_dir}")
wandb.init(
project=training_args.wandb_project,
id=f"{training_args.wandb_name}-run{training_args.wandb_runid}",
name=training_args.wandb_name,
resume=training_args.wandb_resume,
mode="offline" if training_args.wandb_offline else "online",
dir=training_args.wandb_log_dir if training_args.wandb_offline else None, # add wandb log dir for offline mode
settings=wandb.Settings(init_timeout=300)
)
wandb.config.update(training_args)
wandb.config.update(model_args)
wandb.config.update(data_args)
else:
logger = create_logger(None, dist.get_rank())
dist.barrier()
logger.info(f'Training arguments {training_args}')
logger.info(f'Model arguments {model_args}')
logger.info(f'Data arguments {data_args}')
# prepare auto resume logic:
if training_args.auto_resume:
resume_from = get_latest_ckpt(training_args.checkpoint_dir)
if resume_from is None:
resume_from = training_args.resume_from
resume_model_only = training_args.resume_model_only
if resume_model_only:
finetune_from_ema = training_args.finetune_from_ema
else:
finetune_from_ema = False
else:
resume_model_only = False
finetune_from_ema = False
else:
resume_from = training_args.resume_from
resume_model_only = training_args.resume_model_only
if resume_model_only:
finetune_from_ema = training_args.finetune_from_ema
else:
finetune_from_ema = False
# Set seed:
seed = training_args.global_seed * dist.get_world_size() + dist.get_rank()
set_seed(seed)
# Setup model:
if training_args.finetune_from_hf:
llm_config = LLaDAConfig.from_json_file(os.path.join(model_args.model_path, "llm_config.json"))
else:
llm_config = LLaDAConfig.from_pretrained(model_args.llm_path)
llm_config.layer_module = model_args.layer_module
llm_config.qk_norm = model_args.llm_qk_norm
llm_config.tie_word_embeddings = model_args.tie_word_embeddings
llm_config.freeze_und = training_args.freeze_und
if training_args.finetune_from_hf:
language_model = LLaDAModelLM(llm_config)
else:
language_model = LLaDAModelLM.from_pretrained(model_args.llm_path, config=llm_config)
if training_args.copy_init_moe:
language_model.init_moe()
if training_args.visual_und:
if training_args.finetune_from_hf:
vit_config = SiglipVisionConfig.from_json_file(os.path.join(model_args.model_path, "vit_config.json"))
else:
vit_config = SiglipVisionConfig.from_pretrained(model_args.vit_path)
vit_config.num_hidden_layers = vit_config.num_hidden_layers + 1 + model_args.vit_select_layer
vit_config.rope = model_args.vit_rope
if training_args.finetune_from_hf:
vit_model = SiglipVisionModel(vit_config)
else:
vit_model = SiglipVisionModel.from_pretrained(model_args.vit_path, config=vit_config)
if training_args.visual_gen:
vae_model, vae_config = load_ae(
local_path=os.path.join(model_args.model_path, "ae.safetensors")
if training_args.finetune_from_hf else model_args.vae_path
)
if training_args.visual_gen == False:
training_args.visual_gen_repa = False # disable repa if visual_gen is false
if training_args.visual_gen_reg == True:
training_args.visual_gen_repa = True # enable repa if reg is true
if training_args.visual_gen_repa:
repa_model = DINOv3ViTModel.from_pretrained(model_args.dino_path)
repa_model.eval() # set to eval mode
config = LLaDAOConfig(
visual_gen=training_args.visual_gen,
visual_und=training_args.visual_und,
visual_gen_repa=training_args.visual_gen_repa,
visual_gen_reg=training_args.visual_gen_reg,
repa_output_depth=training_args.repa_output_depth,
llm_config=llm_config,
vit_config=vit_config if training_args.visual_und else None,
vae_config=vae_config if training_args.visual_gen else None,
latent_patch_size=model_args.latent_patch_size,
max_latent_size=model_args.max_latent_size,
vit_max_num_patch_per_side=model_args.vit_max_num_patch_per_side,
connector_act=model_args.connector_act,
interpolate_pos=model_args.interpolate_pos,
timestep_shift=training_args.timestep_shift,
)
model = LLaDAO(
language_model,
vit_model if training_args.visual_und else None,
repa_model if training_args.visual_gen_repa else None,
config
)
if training_args.visual_und:
model.vit_model.vision_model.embeddings.convert_conv2d_to_linear(vit_config)
# Setup tokenizer for model:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_path if training_args.finetune_from_hf else model_args.llm_path)
tokenizer, new_token_ids, num_new_tokens = add_special_tokens(tokenizer)
if num_new_tokens > 0:
old_size = model.language_model.get_input_embeddings().num_embeddings
new_size = len(tokenizer)
if new_size > old_size:
# Expansion: safe
model.language_model.resize_token_embeddings(new_size)
model.config.llm_config.vocab_size = new_size
model.language_model.config.vocab_size = new_size
print(f"Expanding vocabulary: {old_size} -> {new_size}")
else:
# Do not shrink: only initialize rows corresponding to newly added tokens (they are < old_size at this point)
with torch.no_grad():
emb = model.language_model.get_input_embeddings()
std = emb.weight.std().item()
for tid in new_token_ids.values():
if tid < old_size:
emb.weight[tid].normal_(0.0, std)
print("Randomly initialize embeddings for new token IDs")
# maybe freeze something:
if training_args.freeze_vae and training_args.visual_gen:
for param in vae_model.parameters():
param.requires_grad = False
if training_args.freeze_llm:
model.language_model.eval()
for param in model.language_model.parameters():
param.requires_grad = False
if training_args.freeze_vit and training_args.visual_und:
model.vit_model.eval()
for param in model.vit_model.parameters():
param.requires_grad = False
if training_args.visual_gen_repa:
model.repa_model.eval()
for param in model.repa_model.parameters():
param.requires_grad = False
# for repa model, we need to freeze all parameters
# Setup FSDP and load pretrained model:
fsdp_config = FSDPConfig(
sharding_strategy=training_args.sharding_strategy,
backward_prefetch=training_args.backward_prefetch,
cpu_offload=training_args.cpu_offload,
num_replicate=training_args.num_replicate,
num_shard=training_args.num_shard,
)
ema_model = deepcopy(model)
model, ema_model = FSDPCheckpoint.try_load_ckpt(
resume_from, logger, model, ema_model, resume_from_ema=finetune_from_ema
)
ema_model = fsdp_ema_setup(ema_model, fsdp_config)
fsdp_model = fsdp_wrapper(model, fsdp_config)
apply_activation_checkpointing(
fsdp_model,
checkpoint_wrapper_fn=functools.partial(
checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT
),
check_fn=grad_checkpoint_check_fn
)
if dist.get_rank() == 0:
print(fsdp_model)
for name, param in model.named_parameters():
print(name, param.requires_grad)
# Setup optimizer and scheduler
optimizer = torch.optim.AdamW(
fsdp_model.parameters(),
lr=training_args.lr,
betas=(training_args.beta1, training_args.beta2),
eps=training_args.eps,
weight_decay=0
)
if training_args.lr_scheduler == 'cosine':
scheduler = get_cosine_with_min_lr_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=training_args.warmup_steps,
num_training_steps=training_args.total_steps,
min_lr=training_args.min_lr,
)
elif training_args.lr_scheduler == 'constant':
scheduler = get_constant_schedule_with_warmup(
optimizer=optimizer, num_warmup_steps=training_args.warmup_steps
)
else:
raise ValueError
# maybe resume optimizer, scheduler, and train_steps
if resume_model_only:
train_step = 0
data_status = None
else:
optimizer, scheduler, train_step, data_status = FSDPCheckpoint.try_load_train_state(
resume_from, optimizer, scheduler, fsdp_config,
)
# Setup packed dataloader
with open(data_args.dataset_config_file, "r") as stream:
dataset_meta = yaml.safe_load(stream)
dataset_config = DataConfig(grouped_datasets=dataset_meta)
if training_args.visual_und:
dataset_config.visual_und = True
dataset_config.vit_patch_size = model_args.vit_patch_size
dataset_config.max_num_patch_per_side = model_args.vit_max_num_patch_per_side
if training_args.visual_gen:
dataset_config.visual_gen = True
vae_image_downsample = model_args.latent_patch_size * vae_config.downsample
dataset_config.vae_image_downsample = vae_image_downsample
dataset_config.max_latent_size = model_args.max_latent_size
dataset_config.text_cond_dropout_prob = model_args.text_cond_dropout_prob
dataset_config.vae_cond_dropout_prob = model_args.vae_cond_dropout_prob
dataset_config.vit_cond_dropout_prob = model_args.vit_cond_dropout_prob
if training_args.visual_gen_reg:
dataset_config.visual_gen_reg = True
if training_args.visual_und_sft:
dataset_config.visual_und_sft = True
if training_args.ada_len:
dataset_config.ada_len = True
if training_args.ada_len_split:
dataset_config.ada_len_split = True
if training_args.visual_und_always_mask_last:
dataset_config.visual_und_always_mask_last = True
if training_args.merge_vit_text_segments:
dataset_config.merge_vit_text_segments = True
dataset_config.loss_reduction = training_args.loss_reduction
train_dataset = PackedDataset(
dataset_config,
tokenizer=tokenizer,
special_tokens=new_token_ids,
local_rank=dist.get_rank(),
world_size=dist.get_world_size(),
num_workers=data_args.num_workers,
expected_num_tokens=training_args.expected_num_tokens,
max_num_tokens_per_sample=data_args.max_num_tokens_per_sample,
max_num_tokens=data_args.max_num_tokens,
max_buffer_size=data_args.max_buffer_size,
prefer_buffer_before=data_args.prefer_buffer_before,
interpolate_pos=model_args.interpolate_pos,
use_flex=training_args.use_flex,
data_status=data_status,
)
train_dataset.set_epoch(data_args.data_seed)
train_loader = DataLoader(
train_dataset,
batch_size=1, # batch size is 1 packed dataset
num_workers=data_args.num_workers,
pin_memory=True,
collate_fn=collate_wrapper(),
drop_last=True,
prefetch_factor=data_args.prefetch_factor,
)
# Prepare models for training:
if training_args.visual_gen:
vae_model.to(device).eval()
fsdp_model.train()
ema_model.eval()
# train loop
start_time = time()
logger.info(f"Training for {training_args.total_steps} steps, starting at {train_step}...")
remaining_steps = training_args.total_steps - train_step
limited_loader = itertools.islice(train_loader, remaining_steps)
for curr_step, data in enumerate(limited_loader, start=train_step):
data = data.cuda(device).to_dict()
data_indexes = data.pop('batch_data_indexes', None)
ce_loss_weights = data.pop('ce_loss_weights', None)
with torch.amp.autocast("cuda", enabled=True, dtype=torch.bfloat16):
if training_args.visual_gen:
with torch.no_grad():
data['padded_latent'] = vae_model.encode(data['padded_images'])
loss_dict = fsdp_model(**data)
loss = 0
ce = loss_dict["ce"]
if ce is not None:
total_ce_tokens = torch.tensor(len(data['ce_loss_indexes']), device=device)
dist.all_reduce(total_ce_tokens, op=dist.ReduceOp.SUM)
if training_args.ce_loss_reweighting:
ce = ce * ce_loss_weights
total_ce_loss_weights = ce_loss_weights.sum()
dist.all_reduce(total_ce_loss_weights, op=dist.ReduceOp.SUM)
ce = ce.sum() * dist.get_world_size() / total_ce_loss_weights
else:
ce = ce.sum() * dist.get_world_size() / total_ce_tokens
loss_dict["ce"] = ce.detach()
loss = loss + ce * training_args.ce_weight
else:
#assert not training_args.visual_und
loss_dict["ce"] = torch.tensor(0, device=device)
total_ce_tokens = torch.tensor(0, device=device)
if training_args.visual_gen:
mse = loss_dict["mse"]
total_mse_tokens = torch.tensor(len(data['mse_loss_indexes']), device=device)
dist.all_reduce(total_mse_tokens, op=dist.ReduceOp.SUM)
mse = mse.mean(dim=-1).sum() * dist.get_world_size() / total_mse_tokens
loss_dict["mse"] = mse.detach()
loss = loss + mse * training_args.mse_weight
else:
assert not training_args.visual_gen
loss_dict["mse"] = torch.tensor(0, device=device)
total_mse_tokens = torch.tensor(0, device=device)
if training_args.visual_gen_reg:
reg = loss_dict["reg"]
loss_dict["reg"] = reg.detach()
loss = loss + reg * training_args.reg_weight
else:
assert not training_args.visual_gen_reg
loss_dict["reg"] = torch.tensor(0, device=device)
if training_args.visual_gen_repa:
repa = loss_dict["repa"]
loss_dict["repa"] = repa.detach()
loss = loss + repa * training_args.repa_weight
else:
assert not training_args.visual_gen_repa
loss_dict["repa"] = torch.tensor(0, device=device)
optimizer.zero_grad()
loss.backward()
total_norm = fsdp_model.clip_grad_norm_(training_args.max_grad_norm)
optimizer.step()
scheduler.step()
fsdp_ema_update(ema_model, fsdp_model, decay=training_args.ema)
# Log loss values:
if curr_step % training_args.log_every == 0:
total_samples = torch.tensor(len(data['sample_lens']), device=device)
dist.all_reduce(total_samples, op=dist.ReduceOp.SUM)
# Measure training speed:
torch.cuda.synchronize()
end_time = time()
steps_per_sec = training_args.log_every / (end_time - start_time)
message = f"(step={curr_step:07d}) "
wandb_log = {}
for key, value in loss_dict.items():
# Reduce loss history over all processes:
avg_loss = torch.tensor(value.item(), device=device)
dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
avg_loss = avg_loss.item() / dist.get_world_size()
message += f"Train Loss {key}: {avg_loss:.4f}, "
wandb_log[key] = avg_loss
message += f"Train Steps/Sec: {steps_per_sec:.2f}, "
if dist.get_rank() == 0:
logger.info(message)
wandb_log['lr'] = optimizer.param_groups[0]['lr']
wandb_log['total_mse_tokens'] = total_mse_tokens.item()
wandb_log['total_ce_tokens'] = total_ce_tokens.item()
wandb_log['total_norm'] = total_norm.item()
wandb_log['total_samples'] = total_samples.item()
mem_allocated = torch.tensor(torch.cuda.max_memory_allocated() / 1024**2, device=device)
dist.all_reduce(mem_allocated, op=dist.ReduceOp.MAX)
wandb_log['mem_allocated'] = mem_allocated
mem_cache = torch.tensor(torch.cuda.max_memory_reserved() / 1024**2, device=device)
dist.all_reduce(mem_cache, op=dist.ReduceOp.MAX)
wandb_log['mem_cache'] = mem_cache
if dist.get_rank() == 0:
wandb.log(wandb_log, step=curr_step)
start_time = time()
if data_status is None:
data_status = {}
for item in data_indexes:
if item['dataset_name'] not in data_status.keys():
data_status[item['dataset_name']] = {}
data_status[item['dataset_name']][item['worker_id']] = item['data_indexes']
if curr_step > 0 and curr_step % training_args.save_every == 0:
if dist.get_rank() == 0:
gather_list = [None] * dist.get_world_size()
else:
gather_list = None
torch.cuda.empty_cache() # add this to remove memory pressure
dist.gather_object(data_status, gather_list, dst=0)
FSDPCheckpoint.fsdp_save_ckpt(
ckpt_dir=training_args.checkpoint_dir,
train_steps=curr_step,
model=fsdp_model,
ema_model=ema_model,
optimizer=optimizer,
scheduler=scheduler,
logger=logger,
fsdp_config=fsdp_config,
data_status=gather_list
)
logger.info("Done!")
dist.barrier() # add
if dist.get_rank() == 0:
wandb.finish()
dist.barrier() # add
dist.destroy_process_group()
if __name__ == "__main__":
main()