-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathtrain.py
111 lines (90 loc) · 3.84 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import os
import torch
import argparse
from networks import get_model
from utils.base_pl_model import BasePLModel
from datasets.midataset import SliceDataset
from utils.loss_functions import calc_loss
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import seed
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
seed.seed_everything(123)
parser = argparse.ArgumentParser('train')
parser.add_argument('--train_data_path', type=str, default='/data/kits/train')
parser.add_argument('--test_data_path', type=str, default='/data/kits/test')
parser.add_argument('--checkpoint_path', type=str, default='/data/checkpoints')
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--model', type=str, default='raunet')
parser.add_argument('--dataset', type=str, default='kits', choices=['kits', 'lits'])
parser.add_argument('--task', type=str, default='tumor', choices=['tumor', 'organ'])
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--lr', type=float, default=1e-2)
class SegPL(BasePLModel):
def __init__(self, params):
super(SegPL, self).__init__()
self.save_hyperparameters(params)
self.net = get_model(self.hparams.model, channels=2)
def forward(self, x):
output, _, _ = self.net(x)
return output
def training_step(self, batch, batch_idx):
ct, mask, name = batch
output = self.forward(ct)
loss = calc_loss(output, mask) # Dice_loss Used
return {'loss': loss}
def validation_step(self, batch, batch_idx):
return self.test_step(batch, batch_idx)
def test_step(self, batch, batch_idx):
ct, mask, name = batch
output = self.forward(ct)
self.measure(batch, output)
def train_dataloader(self):
dataset = SliceDataset(
data_path=self.hparams.train_data_path,
dataset=self.hparams.dataset,
task=self.hparams.task
)
return DataLoader(dataset, batch_size=self.hparams.batch_size, num_workers=32, pin_memory=True, shuffle=True)
def test_dataloader(self):
dataset = SliceDataset(
data_path=self.hparams.test_data_path,
dataset=self.hparams.dataset,
task=self.hparams.task,
train=False
)
return DataLoader(dataset, batch_size=self.hparams.batch_size, num_workers=16, pin_memory=True)
def val_dataloader(self):
return self.test_dataloader()
def configure_optimizers(self):
opt = torch.optim.Adam(self.parameters(), lr=self.hparams.lr, betas=(0.9, 0.999))
scheduler = {'scheduler': torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=self.hparams.epochs, eta_min=1e-6),
'interval': 'epoch',
'frequency': 1}
return [opt], [scheduler]
def main():
args = parser.parse_args()
model = SegPL(args)
# checkpoint
checkpoint_callback = ModelCheckpoint(
dirpath=os.path.join(args.checkpoint_path),
filename='checkpoint_%s_%s_%s_{epoch}' % (args.dataset, args.task, args.model),
save_last=True,
save_top_k=-1,
)
logger = TensorBoardLogger('log', name='%s_%s_%s' % (args.dataset, args.task, args.model))
trainer = Trainer.from_argparse_args(args, max_epochs=args.epochs, gpus=[8], callbacks=checkpoint_callback, logger=logger)
trainer.fit(model)
def test():
args = parser.parse_args()
model = SegPL.load_from_checkpoint(checkpoint_path=os.path.join(args.checkpoint_path, 'last.ckpt'))
trainer = Trainer(gpus=[8])
trainer.test(model)
if __name__ == '__main__':
args = parser.parse_args()
if args.mode == 'train':
main()
if args.mode == 'test':
test()