From 890378e52c6c20d268fa0793fe6ae1ab1df6f6f9 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Fri, 15 Jan 2021 00:53:13 +0700 Subject: [PATCH] Fix batch-size on resume for multi-gpu (#1942) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 459c81c6130d..9aff01a42c08 100644 --- a/train.py +++ b/train.py @@ -477,7 +477,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace - opt.cfg, opt.weights, opt.resume, opt.global_rank, opt.local_rank = '', ckpt, True, *apriori # reinstate + opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')