Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

update examples #2082

Merged
merged 3 commits into from
Feb 23, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/model_compress/APoZ_torch_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def test(model, device, test_loader):

def main():
torch.manual_seed(0)
device = torch.device('cuda')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
Expand Down
2 changes: 1 addition & 1 deletion examples/model_compress/BNN_quantizer_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def adjust_learning_rate(optimizer, epoch):

def main():
torch.manual_seed(0)
device = torch.device('cuda')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
Expand Down
89 changes: 89 additions & 0 deletions examples/model_compress/DoReFaQuantizer_torch_mnist.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from nni.compression.torch import DoReFaQuantizer


class Mnist(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 20, 5, 1)
self.conv2 = torch.nn.Conv2d(20, 50, 5, 1)
self.fc1 = torch.nn.Linear(4 * 4 * 50, 500)
self.fc2 = torch.nn.Linear(500, 10)
self.relu1 = torch.nn.ReLU6()
self.relu2 = torch.nn.ReLU6()
self.relu3 = torch.nn.ReLU6()

def forward(self, x):
x = self.relu1(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = self.relu2(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)


def train(model, quantizer, device, train_loader, optimizer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('{:2.0f}% Loss {}'.format(100 * batch_idx / len(train_loader), loss.item()))

def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)

print('Loss: {} Accuracy: {}%)\n'.format(
test_loss, 100 * correct / len(test_loader.dataset)))

def main():
torch.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True, transform=trans),
batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, transform=trans),
batch_size=1000, shuffle=True)

model = Mnist()
model = model.to(device)
configure_list = [{
'quant_types': ['weight'],
'quant_bits': {
'weight': 8,
}, # you can just use `int` here because all `quan_types` share same bits length, see config for `ReLu6` below.
'op_types':['Conv2d', 'Linear']
}]
quantizer = DoReFaQuantizer(model, configure_list)
quantizer.compress()

optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.5)
for epoch in range(10):
print('# Epoch {} #'.format(epoch))
train(model, quantizer, device, train_loader, optimizer)
test(model, device, test_loader)


if __name__ == '__main__':
main()
2 changes: 1 addition & 1 deletion examples/model_compress/L1_torch_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def test(model, device, test_loader):

def main():
torch.manual_seed(0)
device = torch.device('cuda')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
Expand Down
8 changes: 5 additions & 3 deletions examples/model_compress/MeanActivation_torch_cifar10.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import math
import os
import argparse
import torch
import torch.nn as nn
Expand Down Expand Up @@ -48,7 +49,7 @@ def main():

args = parser.parse_args()
torch.manual_seed(0)
device = torch.device('cuda')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
Expand Down Expand Up @@ -79,10 +80,11 @@ def main():
test(model, device, test_loader)
lr_scheduler.step(epoch)
torch.save(model.state_dict(), 'vgg16_cifar10.pth')

else:
assert os.path.isfile('vgg16_cifar10.pth'), "can not find checkpoint 'vgg16_cifar10.pth'"
model.load_state_dict(torch.load('vgg16_cifar10.pth'))
# Test base model accuracy
print('=' * 10 + 'Test on the original model' + '=' * 10)
model.load_state_dict(torch.load('vgg16_cifar10.pth'))
test(model, device, test_loader)
# top1 = 93.51%

Expand Down
4 changes: 2 additions & 2 deletions examples/model_compress/QAT_torch_quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def test(model, device, test_loader):

def main():
torch.manual_seed(0)
device = torch.device('cpu')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_loader = torch.utils.data.DataLoader(
Expand All @@ -67,7 +67,6 @@ def main():
batch_size=1000, shuffle=True)

model = Mnist()

'''you can change this to DoReFaQuantizer to implement it
DoReFaQuantizer(configure_list).compress(model)
'''
Expand All @@ -86,6 +85,7 @@ def main():
quantizer = QAT_Quantizer(model, configure_list)
quantizer.compress()

model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
for epoch in range(10):
print('# Epoch {} #'.format(epoch))
Expand Down
5 changes: 3 additions & 2 deletions examples/model_compress/fpgm_torch_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def test(model, device, test_loader):

def main():
torch.manual_seed(0)
device = torch.device('cpu')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_loader = torch.utils.data.DataLoader(
Expand All @@ -83,6 +83,7 @@ def main():
batch_size=1000, shuffle=True)

model = Mnist()
model.to(device)
model.print_conv_filter_sparsity()

configure_list = [{
Expand All @@ -92,7 +93,7 @@ def main():

pruner = FPGMPruner(model, configure_list)
pruner.compress()

model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
for epoch in range(10):
pruner.update_epoch(epoch)
Expand Down
2 changes: 1 addition & 1 deletion examples/model_compress/main_torch_pruner.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def test(model, device, test_loader):

def main():
torch.manual_seed(0)
device = torch.device('cuda')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_loader = torch.utils.data.DataLoader(
Expand Down
2 changes: 1 addition & 1 deletion examples/model_compress/pruning_kd.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def test(model, device, test_loader):

def main():
torch.manual_seed(0)
device = torch.device('cuda')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
Expand Down
8 changes: 5 additions & 3 deletions examples/model_compress/slim_torch_cifar10.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import math
import os
import argparse
import torch
import torch.nn as nn
Expand Down Expand Up @@ -57,7 +58,7 @@ def main():
args = parser.parse_args()

torch.manual_seed(0)
device = torch.device('cuda')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
Expand Down Expand Up @@ -90,10 +91,11 @@ def main():
train(model, device, train_loader, optimizer, True)
test(model, device, test_loader)
torch.save(model.state_dict(), 'vgg19_cifar10.pth')

else:
assert os.path.isfile('vgg19_cifar10.pth'), "can not find checkpoint 'vgg19_cifar10.pth'"
model.load_state_dict(torch.load('vgg19_cifar10.pth'))
# Test base model accuracy
print('=' * 10 + 'Test the original model' + '=' * 10)
model.load_state_dict(torch.load('vgg19_cifar10.pth'))
test(model, device, test_loader)
# top1 = 93.60%

Expand Down