Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

Fix pipeline #3366

Merged
merged 7 commits into from
Feb 9, 2021
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 26 additions & 18 deletions examples/model_compress/pruning/basic_pruners_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
import argparse
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
Expand All @@ -35,6 +34,7 @@
L1FilterPruner,
L2FilterPruner,
AGPPruner,
ActivationMeanRankFilterPruner,
ActivationAPoZRankFilterPruner
)

Expand All @@ -49,6 +49,7 @@
'slim': SlimPruner,
'agp': AGPPruner,
'fpgm': FPGMPruner,
'mean_activation': ActivationMeanRankFilterPruner,
'apoz': ActivationAPoZRankFilterPruner
}

Expand All @@ -68,7 +69,7 @@ def get_pruner(model, pruner_name, device, optimizer=None, dependency_aware=Fals
'sparsity': args.sparsity,
'op_types': ['default']
}]
elif pruner_name == 'l1filter':
elif pruner_name in ['l1filter', 'mean_activation', 'apoz']:
# Reproduced result in paper 'PRUNING FILTERS FOR EFFICIENT CONVNETS',
# Conv_1, Conv_8, Conv_9, Conv_10, Conv_11, Conv_12 are pruned with 50% sparsity, as 'VGG-16-pruned-A'
config_list = [{
Expand All @@ -81,6 +82,15 @@ def get_pruner(model, pruner_name, device, optimizer=None, dependency_aware=Fals
'sparsity': args.sparsity,
'op_types': ['BatchNorm2d'],
}]
elif pruner_name == 'agp':
config_list = [{
'initial_sparsity': 0.,
'final_sparsity': 0.8,
'start_epoch': 0,
'end_epoch': 10,
'frequency': 1,
'op_types': ['Conv2d']
}]
else:
config_list = [{
'sparsity': args.sparsity,
Expand Down Expand Up @@ -150,13 +160,13 @@ def get_model_optimizer_scheduler(args, device, train_loader, test_loader, crite
if args.pretrained_model_dir is None:
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(
optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1)
optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1)
elif args.model == 'vgg19':
model = VGG(depth=19).to(device)
if args.pretrained_model_dir is None:
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(
optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1)
optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1)
else:
raise ValueError("model not recognized")

Expand All @@ -183,9 +193,8 @@ def get_model_optimizer_scheduler(args, device, train_loader, test_loader, crite

# setup new opotimizer for fine-tuning
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(
optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1)

scheduler = MultiStepLR(optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1)

print('Pretrained model acc:', best_acc)
return model, optimizer, scheduler

Expand Down Expand Up @@ -253,7 +262,6 @@ def main(args):
mask_path = os.path.join(args.experiment_data_dir, 'mask_{}_{}_{}.pth'.format(
args.model, args.dataset, args.pruner))


pruner = get_pruner(model, args.pruner, device, optimizer, args.dependency_aware)
model = pruner.compress()

Expand Down Expand Up @@ -284,7 +292,7 @@ def main(args):
args.pretrained_model_dir = model_path
model, _, _ = get_model_optimizer_scheduler(args, device, train_loader, test_loader, criterion)
model.eval()

apply_compression_results(model, mask_path, device)

# test model speed
Expand Down Expand Up @@ -316,7 +324,7 @@ def main(args):
parser.add_argument('--data-dir', type=str, default='./data/',
help='dataset directory')
parser.add_argument('--model', type=str, default='vgg16',
choices=['LeNet', 'vgg16' ,'vgg19', 'resnet18'],
choices=['lenet', 'vgg16', 'vgg19', 'resnet18'],
help='model to use')
parser.add_argument('--pretrained-model-dir', type=str, default=None,
help='path to pretrained model')
Expand Down Expand Up @@ -344,27 +352,27 @@ def main(args):
help='toggle dependency aware mode')
parser.add_argument('--pruner', type=str, default='l1filter',
choices=['level', 'l1filter', 'l2filter', 'slim', 'agp',
'fpgm', 'apoz'],
'fpgm', 'mean_activation', 'apoz'],
help='pruner to use')

# fine-tuning
parser.add_argument('--fine-tune-epochs', type=int, default=160,
help='epochs to fine tune')

# speed-up
parser.add_argument('--speed-up', action='store_true', default=False,
help='whether to speed-up the pruned model')

parser.add_argument('--nni', action='store_true', default=False,
parser.add_argument('--nni', action='store_true', default=False,
help="whether to tune the pruners using NNi tuners")

args = parser.parse_args()

if args.nni:
params = nni.get_next_parameter()
print(params)
args.sparsity = params['sparsity']
args.pruner = params['pruner']
args.model = params['pruner']
params = nni.get_next_parameter()
print(params)
args.sparsity = params['sparsity']
args.pruner = params['pruner']
args.model = params['pruner']

main(args)
2 changes: 1 addition & 1 deletion examples/model_compress/pruning/model_speedup.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
'apoz': {
'model_name': 'lenet',
'input_shape': [64, 1, 28, 28],
'masks_file': './experiment_data/mask_lenet_mnist_apoz.pth'
'masks_file': './experiment_data/mask_vgg16_cifar10_apoz.pth'
},
'l1filter': {
'model_name': 'vgg16',
Expand Down
4 changes: 2 additions & 2 deletions pipelines/full-test-linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ jobs:
- script: |
set -e
python3 -m pip install scikit-learn==0.23.2
python3 -m pip install torchvision==0.4.2
python3 -m pip install torch==1.3.1
python3 -m pip install torchvision==0.6.1
python3 -m pip install torch==1.5.1
python3 -m pip install keras==2.1.6
python3 -m pip install tensorflow==2.3.1 tensorflow-estimator==2.3.0
python3 -m pip install thop
Expand Down
2 changes: 1 addition & 1 deletion pipelines/full-test-windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
- script: |
python -m pip install scikit-learn==0.23.2
python -m pip install keras==2.1.6
python -m pip install torchvision===0.4.1 torch===1.3.1 -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install torchvision===0.6.1 torch===1.5.1 -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install tensorflow==2.3.1 tensorflow-estimator==2.3.0
displayName: Install extra dependencies

Expand Down
37 changes: 20 additions & 17 deletions test/scripts/model_compression.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,30 +6,33 @@ echo ""
echo "===========================Testing: pruning and speedup==========================="
cd ${CWD}/../examples/model_compress

for name in fpgm slim l1filter apoz
do
echo "testing $name pruning and speedup..."
python3 model_prune_torch.py --pruner_name $name --pretrain_epochs 1 --prune_epochs 1
python3 model_speedup.py --example_name $name
done

for name in level mean_activation
do
echo "testing $name pruning..."
python3 model_prune_torch.py --pruner_name $name --pretrain_epochs 1 --prune_epochs 1
done
echo "testing fpgm pruning and speedup..."
python3 pruning/basic_pruners_torch.py --pruner fpgm --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10
python3 pruning/model_speedup.py --example_name fpgm

echo "testing slim pruning and speedup..."
python3 pruning/basic_pruners_torch.py --pruner slim --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg19 --dataset cifar10 --sparsity 0.7
python3 pruning/model_speedup.py --example_name slim

echo "testing l1filter pruning and speedup..."
python3 pruning/basic_pruners_torch.py --pruner l1filter --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10
python3 pruning/model_speedup.py --example_name l1filter

echo "testing apoz pruning and speedup..."
python3 pruning/basic_pruners_torch.py --pruner apoz --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10
python3 pruning/model_speedup.py --example_name apoz

echo 'testing level pruner pruning'
python3 model_prune_torch.py --pruner_name level --pretrain_epochs 1 --prune_epochs 1
python3 pruning/basic_pruners_torch.py --pruner level --pretrain-epochs 1 --fine-tune-epochs 1 --model lenet --dataset mnist

echo 'testing agp pruning'
python3 model_prune_torch.py --pruner_name agp --pretrain_epochs 1 --prune_epochs 2
python3 pruning/basic_pruners_torch.py --pruner agp --pretrain-epochs 1 --fine-tune-epochs 1 --model lenet --dataset mnist

echo 'testing mean_activation pruning'
python3 model_prune_torch.py --pruner_name mean_activation --pretrain_epochs 1 --prune_epochs 1
python3 pruning/basic_pruners_torch.py --pruner mean_activation --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10

echo "testing lottery ticket pruning..."
python3 lottery_torch_mnist_fc.py --train_epochs 1
python3 pruning/lottery_torch_mnist_fc.py --train_epochs 1

echo ""
echo "===========================Testing: quantizers==========================="
Expand All @@ -43,4 +46,4 @@ echo "===========================Testing: quantizers==========================="
#echo "testing BNN quantizer..."
#python3 BNN_quantizer_cifar10.py

rm -rf ./checkpoints/*
rm -rf ./experiment_data/*