-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathrun.sh
More file actions
95 lines (93 loc) · 3.04 KB
/
run.sh
File metadata and controls
95 lines (93 loc) · 3.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
PREFIX=fuyun_PCM_sd3_stochastic
MODEL_DIR="[PATH TO SD3]"
OUTPUT_DIR="outputs/lora_64_$PREFIX"
PROJ_NAME="lora_64_formal_$PREFIX"
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
LOGFILE="train_log_$TIMESTAMP.log"
accelerate launch --main_process_port 29500 train_pcm_lora_sd3_adv_stochastic.py \
--pretrained_teacher_model=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--tracker_project_nam=$PROJ_NAME \
--mixed_precision=fp16 \
--resolution=1024 \
--lora_rank=32 \
--learning_rate=5e-6 --loss_type="huber" --adam_weight_decay=1e-3 \
--max_train_steps=20000 \
--dataloader_num_workers=16 \
--w_min=4 \
--w_max=5 \
--validation_steps=1000 \
--checkpointing_steps=2000 --checkpoints_total_limit=10 \
--train_batch_size=2 \
--enable_xformers_memory_efficient_attention \
--gradient_accumulation_steps=1 \
--use_8bit_adam \
--resume_from_checkpoint=latest \
--seed=453645634 \
--report_to=wandb \
--num_euler_timesteps=100 \
--multiphase=1 \
--adv_weight=0.1 \
--adv_lr=1e-5
PREFIX=fuyun_PCM_sd3_2phases
MODEL_DIR="[PATH TO SD3]"
OUTPUT_DIR="outputs/lora_64_$PREFIX"
PROJ_NAME="lora_64_formal_$PREFIX"
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
LOGFILE="train_log_$TIMESTAMP.log"
accelerate launch --main_process_port 29500 train_pcm_lora_sd3_adv.py \
--pretrained_teacher_model=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--tracker_project_nam=$PROJ_NAME \
--mixed_precision=fp16 \
--resolution=1024 \
--lora_rank=32 \
--learning_rate=5e-6 --loss_type="huber" --adam_weight_decay=1e-3 \
--max_train_steps=20000 \
--dataloader_num_workers=16 \
--w_min=4 \
--w_max=5 \
--validation_steps=1000 \
--checkpointing_steps=2000 --checkpoints_total_limit=10 \
--train_batch_size=2 \
--enable_xformers_memory_efficient_attention \
--gradient_accumulation_steps=1 \
--use_8bit_adam \
--resume_from_checkpoint=latest \
--seed=453645634 \
--report_to=wandb \
--num_euler_timesteps=100 \
--multiphase=2 \
--adv_weight=0.1 \
--adv_lr=1e-5
PREFIX=fuyun_PCM_sd3_4phases
MODEL_DIR="[PATH TO SD3]"
OUTPUT_DIR="outputs/lora_64_$PREFIX"
PROJ_NAME="lora_64_formal_$PREFIX"
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
LOGFILE="train_log_$TIMESTAMP.log"
accelerate launch --main_process_port 29500 train_pcm_lora_sd3_adv.py \
--pretrained_teacher_model=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--tracker_project_nam=$PROJ_NAME \
--mixed_precision=fp16 \
--resolution=1024 \
--lora_rank=32 \
--learning_rate=5e-6 --loss_type="huber" --adam_weight_decay=1e-3 \
--max_train_steps=20000 \
--dataloader_num_workers=16 \
--w_min=4 \
--w_max=5 \
--validation_steps=1000 \
--checkpointing_steps=2000 --checkpoints_total_limit=10 \
--train_batch_size=2 \
--enable_xformers_memory_efficient_attention \
--gradient_accumulation_steps=1 \
--use_8bit_adam \
--resume_from_checkpoint=latest \
--seed=453645634 \
--report_to=wandb \
--num_euler_timesteps=100 \
--multiphase=4 \
--adv_weight=0.1 \
--adv_lr=1e-5