Skip to content

Commit

Permalink
Fix bug: optimize bn when distributed is True
Browse files Browse the repository at this point in the history
  • Loading branch information
Sundrops authored Apr 1, 2021
1 parent 0384583 commit 607f01a
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions det3d/torchie/apis/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,8 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, logge

total_steps = cfg.total_epochs * len(data_loaders[0])
# print(f"total_steps: {total_steps}")

if distributed:
model = apex.parallel.convert_syncbn_model(model)
if cfg.lr_config.type == "one_cycle":
# build trainer
optimizer = build_one_cycle_optimizer(model, cfg.optimizer)
Expand All @@ -281,7 +282,6 @@ def train_detector(model, dataset, cfg, distributed=False, validate=False, logge

# put model on gpus
if distributed:
model = apex.parallel.convert_syncbn_model(model)
model = DistributedDataParallel(
model.cuda(cfg.local_rank),
device_ids=[cfg.local_rank],
Expand Down

0 comments on commit 607f01a

Please sign in to comment.