Skip to content

Commit

Permalink
add CLI test for examples (#2285)
Browse files Browse the repository at this point in the history
* cli examples

* ddp

* CI

* CI

* req

* tests

* skip DDP

Co-authored-by: William Falcon <[email protected]>
  • Loading branch information
Borda and williamFalcon authored Jun 27, 2020
1 parent 6673fc9 commit 4e13e41
Show file tree
Hide file tree
Showing 6 changed files with 108 additions and 80 deletions.
4 changes: 3 additions & 1 deletion .drone.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,11 @@ steps:
- pip install -r ./requirements/base.txt --user -q
- pip install -r ./requirements/devel.txt --user -q
#- pip install -r ./requirements/docs.txt --user -q
- pip install -r ./requirements/examples.txt --user -q
- pip list
- python -c "import torch ; print(' & '.join([torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())]) if torch.cuda.is_available() else 'only CPU')"
- coverage run --source pytorch_lightning -m py.test pytorch_lightning tests benchmarks -v --durations=25 # --flake8
- coverage run --source pytorch_lightning -m py.test pytorch_lightning tests -v --durations=25 # --flake8
- python -m py.test benchmarks pl_examples -v --maxfail=2 --durations=0 # --flake8
#- cd docs; make doctest; make coverage
- coverage report
- codecov --token $CODECOV_TOKEN # --pr $DRONE_PULL_REQUEST --build $DRONE_BUILD_NUMBER --branch $DRONE_BRANCH --commit $DRONE_COMMIT --tag $DRONE_TAG
Expand Down
17 changes: 9 additions & 8 deletions pl_examples/basic_examples/cpu_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,10 @@
import os
from argparse import ArgumentParser

import numpy as np
import torch

import pytorch_lightning as pl
from pytorch_lightning import Trainer, seed_everything
from pl_examples.models.lightning_template import LightningTemplateModel

pl.seed_everything(234)
seed_everything(234)


def main(args):
Expand All @@ -26,15 +23,15 @@ def main(args):
# ------------------------
# 2 INIT TRAINER
# ------------------------
trainer = pl.Trainer.from_argparse_args(args)
trainer = Trainer.from_argparse_args(args)

# ------------------------
# 3 START TRAINING
# ------------------------
trainer.fit(model)


if __name__ == '__main__':
def run_cli():
# ------------------------
# TRAINING ARGUMENTS
# ------------------------
Expand All @@ -44,10 +41,14 @@ def main(args):

# each LightningModule defines arguments relevant to it
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
parser = pl.Trainer.add_argparse_args(parser)
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args()

# ---------------------
# RUN TRAINING
# ---------------------
main(args)


if __name__ == '__main__':
run_cli()
52 changes: 13 additions & 39 deletions pl_examples/basic_examples/gpu_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,44 +4,34 @@
import os
from argparse import ArgumentParser

import numpy as np
import torch

import pytorch_lightning as pl
from pytorch_lightning import Trainer, seed_everything
from pl_examples.models.lightning_template import LightningTemplateModel

SEED = 2334
torch.manual_seed(SEED)
np.random.seed(SEED)
seed_everything(234)


def main(hparams):
def main(args):
"""
Main training routine specific for this project
:param hparams:
"""
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
model = LightningTemplateModel(**vars(hparams))
model = LightningTemplateModel(**vars(args))

# ------------------------
# 2 INIT TRAINER
# ------------------------
trainer = pl.Trainer(
max_epochs=hparams.epochs,
gpus=hparams.gpus,
distributed_backend=hparams.distributed_backend,
precision=16 if hparams.use_16bit else 32,
)
trainer = Trainer.from_argparse_args(args)

# ------------------------
# 3 START TRAINING
# ------------------------
trainer.fit(model)


if __name__ == '__main__':
def run_cli():
# ------------------------
# TRAINING ARGUMENTS
# ------------------------
Expand All @@ -50,32 +40,16 @@ def main(hparams):
root_dir = os.path.dirname(os.path.realpath(__file__))
parent_parser = ArgumentParser(add_help=False)

# gpu args
parent_parser.add_argument(
'--gpus',
type=int,
default=2,
help='how many gpus'
)
parent_parser.add_argument(
'--distributed_backend',
type=str,
default='dp',
help='supports four options dp, ddp, ddp2, ddp_spawn, ...',
choices=['dp', 'ddp', 'ddp2', 'ddp_spawn', 'ddp_cpu'],
)
parent_parser.add_argument(
'--use_16bit',
dest='use_16bit',
action='store_true',
help='if true uses 16 bit precision'
)

# each LightningModule defines arguments relevant to it
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
hyperparams = parser.parse_args()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args()

# ---------------------
# RUN TRAINING
# ---------------------
main(hyperparams)
main(args)


if __name__ == '__main__':
run_cli()
34 changes: 18 additions & 16 deletions pl_examples/basic_examples/multi_node_ddp2_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,31 +4,28 @@
import os
from argparse import ArgumentParser

import numpy as np
import torch

import pytorch_lightning as pl
from pytorch_lightning import Trainer, seed_everything
from pl_examples.models.lightning_template import LightningTemplateModel

SEED = 2334
torch.manual_seed(SEED)
np.random.seed(SEED)
seed_everything(234)


def main(hparams):
def main(args):
"""Main training routine specific for this project."""
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
model = LightningTemplateModel(hparams)
model = LightningTemplateModel(args)

# ------------------------
# 2 INIT TRAINER
# ------------------------
trainer = pl.Trainer(
gpus=2,
num_nodes=2,
distributed_backend='ddp2'
trainer = Trainer(
gpus=args.gpus,
num_nodes=args.num_nodes,
distributed_backend='ddp2',
max_epochs=args.max_epochs,
max_steps=args.max_steps,
)

# ------------------------
Expand All @@ -37,15 +34,20 @@ def main(hparams):
trainer.fit(model)


if __name__ == '__main__':
def run_cli():
root_dir = os.path.dirname(os.path.realpath(__file__))
parent_parser = ArgumentParser(add_help=False)

# each LightningModule defines arguments relevant to it
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
hyperparams = parser.parse_args()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args()

# ---------------------
# RUN TRAINING
# ---------------------
main(hyperparams)
main(args)


if __name__ == '__main__':
run_cli()
34 changes: 18 additions & 16 deletions pl_examples/basic_examples/multi_node_ddp_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,31 +4,28 @@
import os
from argparse import ArgumentParser

import numpy as np
import torch

import pytorch_lightning as pl
from pl_examples.models.lightning_template import LightningTemplateModel
from pytorch_lightning import Trainer, seed_everything

SEED = 2334
torch.manual_seed(SEED)
np.random.seed(SEED)
seed_everything(234)


def main(hparams):
def main(args):
"""Main training routine specific for this project."""
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
model = LightningTemplateModel(hparams)
model = LightningTemplateModel(args)

# ------------------------
# 2 INIT TRAINER
# ------------------------
trainer = pl.Trainer(
gpus=2,
num_nodes=2,
distributed_backend='ddp'
trainer = Trainer(
gpus=args.gpus,
num_nodes=args.num_nodes,
distributed_backend='ddp',
max_epochs=args.max_epochs,
max_steps=args.max_steps,
)

# ------------------------
Expand All @@ -37,15 +34,20 @@ def main(hparams):
trainer.fit(model)


if __name__ == '__main__':
def run_cli():
root_dir = os.path.dirname(os.path.realpath(__file__))
parent_parser = ArgumentParser(add_help=False)

# each LightningModule defines arguments relevant to it
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
hyperparams = parser.parse_args()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args()

# ---------------------
# RUN TRAINING
# ---------------------
main(hyperparams)
main(args)


if __name__ == '__main__':
run_cli()
47 changes: 47 additions & 0 deletions pl_examples/test_examples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
from unittest import mock

import pytest
import torch


@pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 3'])
def test_cpu_template(cli_args):
"""Test running CLI for an example with default params."""
from pl_examples.basic_examples.cpu_template import run_cli

cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
run_cli()


@pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 3'])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_gpu_template(cli_args):
"""Test running CLI for an example with default params."""
from pl_examples.basic_examples.gpu_template import run_cli

cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
run_cli()


# @pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 3 --num_nodes 1 --gpus 2'])
# @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
# def test_multi_node_ddp(cli_args):
# """Test running CLI for an example with default params."""
# from pl_examples.basic_examples.multi_node_ddp_demo import run_cli
#
# cli_args = cli_args.split(' ') if cli_args else []
# with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
# run_cli()


# @pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 3 --num_nodes 1 --gpus 2'])
# @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
# def test_multi_node_ddp2(cli_args):
# """Test running CLI for an example with default params."""
# from pl_examples.basic_examples.multi_node_ddp2_demo import run_cli
#
# cli_args = cli_args.split(' ') if cli_args else []
# with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
# run_cli()

0 comments on commit 4e13e41

Please sign in to comment.