Skip to content

Commit

Permalink
UPD: change deafult of batch size and viz
Browse files Browse the repository at this point in the history
  • Loading branch information
vqdang committed Jan 6, 2021
1 parent b527c00 commit e2b7c52
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 16 deletions.
2 changes: 1 addition & 1 deletion infer/tile.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def detach_items_of_uid(items_list, uid, nr_expected_items):
overlay_kwargs = {
"draw_dot": self.draw_dot,
"type_colour": self.type_info_dict,
"line_thickness": 1,
"line_thickness": 2,
}
func_args = (
self.post_proc_func,
Expand Down
2 changes: 1 addition & 1 deletion models/hovernet/opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def get_config(nr_type, mode):
},
},
"target_info": {"gen": (gen_targets, {}), "viz": (prep_sample, {})},
"batch_size": {"train": 8, "valid": 16,},
"batch_size": {"train": 4, "valid": 8,}, # batch size per gpu
"nr_epochs": 50,
},
],
Expand Down
31 changes: 18 additions & 13 deletions run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
'original' or 'fast'. [default: fast]
--nr_inference_workers=<n> Number of workers during inference. [default: 8]
--nr_post_proc_workers=<n> Number of workers during post-processing. [default: 16]
--batch_size=<n> Batch size. [default: 128]
--batch_size=<n> Batch size per 1 GPU. [default: 32]
Two command mode are `tile` and `wsi` to enter corresponding inference mode
tile run the inference on tile
Expand Down Expand Up @@ -68,9 +68,11 @@
--save_mask To save mask. [default: False]
"""

import torch
import logging
import os
import copy
from misc.utils import log_info
from docopt import docopt

#-------------------------------------------------------------------------------------------------------
Expand All @@ -82,6 +84,16 @@
sub_cmd = args.pop('<command>')
sub_cmd_args = args.pop('<args>')

# ! TODO: where to save logging
logging.basicConfig(
level=logging.INFO,
format='|%(asctime)s.%(msecs)03d| [%(levelname)s] %(message)s',datefmt='%Y-%m-%d|%H:%M:%S',
handlers=[
logging.FileHandler("debug.log"),
logging.StreamHandler()
]
)

if args['--help'] and sub_cmd is not None:
if sub_cmd in sub_cli_dict:
print(sub_cli_dict[sub_cmd])
Expand All @@ -98,6 +110,9 @@
gpu_list = args.pop('--gpu')
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list

nr_gpus = torch.cuda.device_count()
log_info('Detect #GPUS: %d' % nr_gpus)

args = {k.replace('--', '') : v for k, v in args.items()}
sub_args = {k.replace('--', '') : v for k, v in sub_args.items()}
if args['model_path'] == None:
Expand All @@ -118,7 +133,7 @@

# ***
run_args = {
'batch_size' : int(args['batch_size']),
'batch_size' : int(args['batch_size']) * nr_gpus,

'nr_inference_workers' : int(args['nr_inference_workers']),
'nr_post_proc_workers' : int(args['nr_post_proc_workers']),
Expand Down Expand Up @@ -156,17 +171,7 @@
'save_mask' : sub_args['save_mask'],
})
# ***

# ! TODO: where to save logging
logging.basicConfig(
level=logging.INFO,
format='|%(asctime)s.%(msecs)03d| [%(levelname)s] %(message)s',datefmt='%Y-%m-%d|%H:%M:%S',
handlers=[
logging.FileHandler("debug.log"),
logging.StreamHandler()
]
)

exit()
if sub_cmd == 'tile':
from infer.tile import InferManager
infer = InferManager(**method_args)
Expand Down
5 changes: 4 additions & 1 deletion run_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def _get_datagen(self, batch_size, run_mode, target_gen, nr_procs=0, fold_idx=0)
dataloader = DataLoader(
input_dataset,
num_workers=nr_procs,
batch_size=batch_size,
batch_size=batch_size * self.nr_gpus,
shuffle=run_mode == "train",
drop_last=run_mode == "train",
worker_init_fn=worker_init_fn,
Expand Down Expand Up @@ -264,6 +264,9 @@ def get_last_chkpt_path(prev_phase_dir, net_name):
####
def run(self):
"""Define multi-stage run or cross-validation or whatever in here."""
self.nr_gpus = torch.cuda.device_count()
print('Detect #GPUS: %d' % self.nr_gpus)

phase_list = self.model_config["phase_list"]
engine_opt = self.model_config["run_engine"]

Expand Down

0 comments on commit e2b7c52

Please sign in to comment.