forked from yz93/LAVT-RIS
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_pseudo.py
More file actions
418 lines (359 loc) · 18.6 KB
/
train_pseudo.py
File metadata and controls
418 lines (359 loc) · 18.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
import datetime
from torch.cuda.amp import autocast, GradScaler
from utils import NativeScalerWithGradNormCount
import os
import time
import torch
import torch.utils.data
from functools import reduce
import operator
from bert.modeling_bert import BertModel
from lib import segmentation
import transforms as T
import utils
import numpy as np
import json
from augmentation.mask_aug import patchify, unpatchify, random_masking
from misc.common import make_object_from_config
from misc.workspace import create_workspace, save_configs_and_args
from torch.utils.tensorboard import SummaryWriter
# ----------------------- 重要修改开始 -----------------------
# 我们不再通过 argparse 从命令行获取 local_rank
# 而是在 main() 函数中从环境变量读取
def get_args_parser():
parser = get_parser() # 假设 get_parser() 来自你的 args.py
# 注意:这里不再添加 --local_rank 参数
return parser
# ----------------------- 重要修改结束 -----------------------
def get_dataset(image_set, transform, args):
from data.dataset_refer_bert import ReferDataset
ds = ReferDataset(args,
split=image_set,
image_transforms=transform,
target_transforms=None
)
num_classes = 2
return ds, num_classes
# IoU calculation for validation
def IoU(pred, gt):
pred = pred.argmax(1)
intersection = torch.sum(torch.mul(pred, gt))
union = torch.sum(torch.add(pred, gt)) - intersection
if intersection == 0 or union == 0:
iou = 0
else:
iou = float(intersection) / float(union)
return iou, intersection, union
def get_transform(args):
transforms = [T.Resize(args.img_size, args.img_size),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]
return T.Compose(transforms)
def evaluate(model, data_loader, bert_model, writer=None, epoch=None):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
total_its = 0
acc_ious = 0
# evaluation variables
cum_I, cum_U = 0, 0
eval_seg_iou_list = [.5, .6, .7, .8, .9]
seg_correct = np.zeros(len(eval_seg_iou_list), dtype=np.int32)
seg_total = 0
mean_IoU = []
with torch.no_grad():
for data in metric_logger.log_every(data_loader, 100, header):
total_its += 1
image, target, sentences, attentions = data
image, target, sentences, attentions = image.cuda(non_blocking=True),\
target.cuda(non_blocking=True),\
sentences.cuda(non_blocking=True),\
attentions.cuda(non_blocking=True)
sentences = sentences.squeeze(1)
attentions = attentions.squeeze(1)
with torch.no_grad():
if bert_model is not None:
last_hidden_states = bert_model(sentences, attention_mask=attentions)[0]
embedding = last_hidden_states.permute(0, 2, 1) # (B, 768, N_l) to make Conv1d happy
attentions = attentions.unsqueeze(dim=-1) # (B, N_l, 1)
output = model(image, embedding, l_mask=attentions)
else:
output = model(image, sentences, l_mask=attentions)
iou, I, U = IoU(output, target)
acc_ious += iou
mean_IoU.append(iou)
cum_I += I
cum_U += U
for n_eval_iou in range(len(eval_seg_iou_list)):
eval_seg_iou = eval_seg_iou_list[n_eval_iou]
seg_correct[n_eval_iou] += (iou >= eval_seg_iou)
seg_total += 1
iou = acc_ious / total_its
mean_IoU = np.array(mean_IoU)
mIoU = np.mean(mean_IoU)
print('Final results:')
print('Mean IoU is %.2f\n' % (mIoU * 100.))
results_str = ''
for n_eval_iou in range(len(eval_seg_iou_list)):
precision = seg_correct[n_eval_iou] * 100. / seg_total
results_str += ' precision@%s = %.2f\n' % \
(str(eval_seg_iou_list[n_eval_iou]), precision)
if writer is not None and epoch is not None:
writer.add_scalar(f"val/precision@{eval_seg_iou_list[n_eval_iou]}", precision, epoch)
results_str += ' overall IoU = %.2f\n' % (cum_I * 100. / cum_U)
print(results_str)
mIoU, oIoU = 100 * mIoU, 100 * cum_I / cum_U
if writer is not None and epoch is not None:
writer.add_scalar("val/mean_IoU", mIoU, epoch)
writer.add_scalar("val/overall_IoU", oIoU, epoch)
return mIoU, oIoU
def freeze_model(model):
for param in model.parameters():
param.requires_grad = False
def unfreeze_model(model):
for param in model.parameters():
param.requires_grad = True
def train_one_epoch(model, label_criterion, consistent_criterion, alpha, optimizer, data_loader, lr_scheduler, epoch, print_freq,
iterations, bert_model, writer=None, stream_configs=None):
enable_masking = stream_configs.get("enable_image_masking", False)
if enable_masking:
mask_ratio = stream_configs.get("mask_ratio", 0.75)
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('label_loss', utils.SmoothedValue(window_size=20, fmt='{value:.4f}'))
metric_logger.add_meter('consistent_loss', utils.SmoothedValue(window_size=20, fmt='{value:.4f}'))
header = 'Epoch: [{}]'.format(epoch)
loss_scaler = NativeScalerWithGradNormCount()
for i, data in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# image, target, sentences, attentions, aug_sentences, aug_attentions = data
optimizer.zero_grad()
image = data['img']
target = data['target']
sentences = data['txt']
attentions = data['attention_mask']
aug_sentences = data['aug_txt']
aug_attentions = data['aug_attention_mask']
aug_image = image.clone()
image = image.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
sentences = sentences.cuda(non_blocking=True).squeeze(1)
attentions = attentions.cuda(non_blocking=True).squeeze(1)
aug_sentences = aug_sentences.cuda(non_blocking=True).squeeze(1)
aug_attentions = aug_attentions.cuda(non_blocking=True).squeeze(1)
aug_image = aug_image.cuda(non_blocking=True)
if enable_masking:
aug_image = patchify(aug_image, stream_configs["image_mask_patch_size"])
aug_image = random_masking(aug_image, stream_configs["image_mask_ratio"])
aug_image = unpatchify(aug_image, stream_configs["image_mask_patch_size"])
# Primary branch
with torch.cuda.amp.autocast():
last_hidden_states = bert_model(sentences, attention_mask=attentions)[0]
embedding = last_hidden_states.permute(0, 2, 1)
l_mask = attentions.unsqueeze(-1)
output_primary = model(image, embedding, l_mask=l_mask)
label_loss = label_criterion(output_primary, target) * alpha
loss_scaler(
loss=label_loss, optimizer=optimizer, clip_grad=None, parameters=model.parameters(), create_graph=False, update_grad=False
)
# Distilled branch
with torch.cuda.amp.autocast():
aug_last_hidden_states = bert_model(aug_sentences, attention_mask=aug_attentions)[0]
aug_embedding = aug_last_hidden_states.permute(0, 2, 1)
aug_l_mask = aug_attentions.unsqueeze(-1)
output_aug = model(aug_image, aug_embedding, l_mask=aug_l_mask)
consistency_loss = consistent_criterion(output_primary.detach(), output_aug, scale_factor = 1.0 - alpha)
grad_norm = loss_scaler(
loss=consistency_loss, optimizer=optimizer, clip_grad=None, parameters=model.parameters(), create_graph=False, update_grad=True
)
total_loss = label_loss + consistency_loss
lr_scheduler.step()
metric_logger.update(
loss=total_loss.item(),
label_loss=label_loss.item(),
consistent_loss=consistency_loss.item(),
lr=optimizer.param_groups[0]["lr"]
)
if writer is not None:
global_step = epoch * len(data_loader) + i
writer.add_scalar("train/total_loss", total_loss.item(), global_step)
writer.add_scalar("train/label_loss", label_loss.item(), global_step)
writer.add_scalar("train/consistent_loss", consistency_loss.item(), global_step)
writer.add_scalar("train/lr", optimizer.param_groups[0]["lr"], global_step)
print(f"Epoch {epoch}: Avg Label Loss: {metric_logger.meters['label_loss'].global_avg:.4f}, "
f"Avg Consistency Loss: {metric_logger.meters['consistent_loss'].global_avg:.4f}, ")
if writer is not None:
writer.add_scalar("train/epoch_avg_label_loss", metric_logger.meters['label_loss'].global_avg, epoch)
writer.add_scalar("train/epoch_avg_consistent_loss", metric_logger.meters['consistent_loss'].global_avg, epoch)
writer.add_scalar("train/epoch_avg_loss", metric_logger.meters['loss'].global_avg, epoch)
return metric_logger.meters['loss'].global_avg, iterations
def main(args):
workspace_dir, checkpoints_dir, logs_dir, configs_dir = create_workspace(args)
print(f"Workspace created at: {workspace_dir}")
save_configs_and_args(args, configs_dir, args.configs)
writer = SummaryWriter(logs_dir)
print(f"TensorBoard logs will be saved to: {logs_dir}")
dataset_test, _ = get_dataset("val",
get_transform(args=args),
args=args)
configs = json.load(open(args.configs, 'r'))
dataset = make_object_from_config(configs["train"]["dataset"])
label_criterion = make_object_from_config(configs["train"]["loss"]["label_loss"])
consistent_criterion = make_object_from_config(configs["train"]["loss"]["consistent_loss"])
alpha = configs["train"]["loss"]["alpha"]
# batch sampler
print(f"local rank {args.local_rank} / global rank {utils.get_rank()} successfully built train dataset.")
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank,
shuffle=True)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
# data loader
print("Building data loader...")
print("batch size: {}".format(args.batch_size))
print("number of workers: {}".format(args.workers))
print("pin memory: {}".format(args.pin_mem))
print("number of training samples: {}".format(len(dataset)))
print("number of validation samples: {}".format(len(dataset_test)))
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers, pin_memory=args.pin_mem, drop_last=True, persistent_workers=True)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers)
print("Data loader length: {}".format(len(data_loader)))
# model initialization
# ========== 原始模型初始化 ==========
model = segmentation.__dict__[args.model](pretrained=args.pretrained_swin_weights, args=args)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], find_unused_parameters=True)
single_model = model.module
if args.model != 'lavt_one':
model_class = BertModel
bert_model = model_class.from_pretrained(args.ck_bert)
bert_model.pooler = None # a work-around for a bug in Transformers = 3.0.2 that appears for DistributedDataParallel
bert_model.cuda()
bert_model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(bert_model)
bert_model = torch.nn.parallel.DistributedDataParallel(bert_model, device_ids=[args.local_rank])
single_bert_model = bert_model.module
else:
bert_model = None
single_bert_model = None
# resume training
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu', weights_only=False)
single_model.load_state_dict(checkpoint['model'])
if args.model != 'lavt_one':
single_bert_model.load_state_dict(checkpoint['bert_model'])
# parameters to optimize
backbone_no_decay = list()
backbone_decay = list()
for name, m in single_model.backbone.named_parameters():
if 'norm' in name or 'absolute_pos_embed' in name or 'relative_position_bias_table' in name:
backbone_no_decay.append(m)
else:
backbone_decay.append(m)
if args.model != 'lavt_one':
params_to_optimize = [
{'params': backbone_no_decay, 'weight_decay': 0.0},
{'params': backbone_decay},
{"params": [p for p in single_model.classifier.parameters() if p.requires_grad]},
# the following are the parameters of bert
{"params": reduce(operator.concat,
[[p for p in single_bert_model.encoder.layer[i].parameters()
if p.requires_grad] for i in range(10)])},
]
else:
params_to_optimize = [
{'params': backbone_no_decay, 'weight_decay': 0.0},
{'params': backbone_decay},
{"params": [p for p in single_model.classifier.parameters() if p.requires_grad]},
# the following are the parameters of bert
{"params": reduce(operator.concat,
[[p for p in single_model.text_encoder.encoder.layer[i].parameters()
if p.requires_grad] for i in range(10)])},
]
# optimizer
optimizer = torch.optim.AdamW(params_to_optimize,
lr=args.lr,
weight_decay=args.weight_decay,
amsgrad=args.amsgrad
)
# learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
lambda x: (1 - x / (len(data_loader) * args.epochs)) ** 0.9)
# housekeeping
start_time = time.time()
iterations = 0
best_mIoU = -0.1
# resume training (optimizer, lr scheduler, and the epoch)
if args.resume:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
resume_epoch = checkpoint['epoch']
# resume_epoch = -999
else:
resume_epoch = -999
# training loops
for epoch in range(max(0, resume_epoch+1), args.epochs):
data_loader.sampler.set_epoch(epoch)
## Train
train_one_epoch(model, label_criterion, consistent_criterion, alpha, optimizer, data_loader, lr_scheduler, epoch, args.print_freq,
iterations, bert_model, writer if utils.get_rank() == 0 else None, configs["train"]["stream_configs"])
## Evaluate
iou, overallIoU = evaluate(model, data_loader_test, bert_model, writer if utils.get_rank() == 0 else None, epoch)
print('Average object IoU {}'.format(iou))
print('Overall IoU {}'.format(overallIoU))
save_checkpoint = (best_mIoU < iou)
if save_checkpoint:
print('Better epoch: {}\n'.format(epoch))
if single_bert_model is not None:
dict_to_save = {'model': single_model.state_dict(), 'bert_model': single_bert_model.state_dict(),
'optimizer': optimizer.state_dict(), 'epoch': epoch, 'args': args,
'lr_scheduler': lr_scheduler.state_dict()}
else:
dict_to_save = {'model': single_model.state_dict(),
'optimizer': optimizer.state_dict(), 'epoch': epoch, 'args': args,
'lr_scheduler': lr_scheduler.state_dict()}
best_model_path = os.path.join(checkpoints_dir, 'model_best_{}.pth'.format(args.model_id))
utils.save_on_master(dict_to_save, best_model_path)
best_mIoU = iou
if utils.get_rank() == 0:
writer.add_scalar("best/mIoU", best_mIoU, epoch)
## Save the last checkpoint
if single_bert_model is not None:
dict_to_save = {'model': single_model.state_dict(), 'bert_model': single_bert_model.state_dict(),
'optimizer': optimizer.state_dict(), 'epoch': epoch, 'args': args,
'lr_scheduler': lr_scheduler.state_dict()}
else:
dict_to_save = {'model': single_model.state_dict(),
'optimizer': optimizer.state_dict(), 'epoch': epoch, 'args': args,
'lr_scheduler': lr_scheduler.state_dict()}
checkpoint_path = os.path.join(checkpoints_dir, 'model_last_{}.pth'.format(args.model_id))
utils.save_on_master(dict_to_save, checkpoint_path)
# summarize
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if utils.get_rank() == 0:
writer.add_text('summary/training_time', total_time_str)
writer.add_scalar("summary/best_mIoU", best_mIoU, epoch)
writer.close()
print(f"Training completed. Best mIoU: {best_mIoU:.2f}")
print(f"Best model saved at: {os.path.join(checkpoints_dir, 'model_best_{}.pth'.format(args.model_id))}")
print(f"TensorBoard logs saved at: {logs_dir}")
if __name__ == "__main__":
from args import get_parser
parser = get_parser()
args = parser.parse_args()
# ----------------------- 关键修复:必须在 init_distributed_mode 之前 -----------------------
import os
# 从环境变量获取 LOCAL_RANK 并赋值给 args.local_rank
args.local_rank = int(os.environ.get('LOCAL_RANK', 0))
# -----------------------------------------------------------------------------------
# set up distributed learning
# 这个函数内部会调用 torch.cuda.set_device(args.local_rank)
utils.init_distributed_mode(args)
print('Image size: {}'.format(str(args.img_size)))
main(args)