-
Notifications
You must be signed in to change notification settings - Fork 25
Expand file tree
/
Copy pathfinetune_bliva_flant5.yaml
More file actions
147 lines (131 loc) · 3.04 KB
/
finetune_bliva_flant5.yaml
File metadata and controls
147 lines (131 loc) · 3.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
model:
arch: bliva_flant5
model_type: flant5xxl
load_finetuned: False
load_pretrained: True
pretrained: "please specify the path to the pretrained model"
finetuned: ""
# vit encoder
image_size: 224
drop_path_rate: 0
use_grad_checkpoint: False
vit_precision: "fp16"
freeze_vit: True
# Q-Former
num_query_token: 32
# T5
t5_model: "google/flan-t5-xxl"
# generation configs
prompt: ""
datasets:
ocrvqa:
vis_processor:
train:
name: "blip2_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
coco_vqa:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
ok_vqa:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
aok_vqa:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
coco_caption:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
llavavqa:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
textcaps:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
vqg_coco_vqa:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
vqg_ok_vqa:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
vqg_aok_vqa:
vis_processor:
train:
name: "blip_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
run:
runner: runner_iter
task: image_text_pretrain
# optimizer
lr_sched: "linear_warmup_cosine_lr"
init_lr: 1e-5
min_lr: 0
warmup_lr: 1e-8
accum_grad_iters: 1
weight_decay: 0.05
max_epoch: 2
max_iters: 200000
iters_per_inner_epoch: 100000
batch_size_train: 3
batch_size_eval: 1
num_workers: 8
warmup_steps: 1000
seed: 42
output_dir: "output/finetuned_bliva_flant5/"
amp: True
resume_ckpt_path: null
evaluate: False
train_splits: ["train"]
device: "cuda"
world_size: 1
dist_url: "env://"
distributed: True
train_dataset_ratios: {ocrvqa: 0.20482112476204395, coco_vqa: 0.15271327995696837,
ok_vqa: 0.02175930084085304, aok_vqa: 0.02993954782443368, coco_caption: 0.17258428920102808, llavavqa: 0.1378184390146113,
textcaps: 0.07595188977780638,
vqg_coco_vqa: 0.15271327995696837, vqg_ok_vqa: 0.02175930084085304 , vqg_aok_vqa: 0.02993954782443368 }