From d6b61e2f8d486bc973072c434482d0fa0d40b9a7 Mon Sep 17 00:00:00 2001 From: liuzhe-lz <40699903+liuzhe-lz@users.noreply.github.com> Date: Mon, 14 Oct 2019 11:21:31 +0800 Subject: [PATCH] Resolve comments in PR 1571 (#1590) * Resolve comments in PR 1571 * try to pass ut * fix typo * format doc-string * use tensorflow.compat.v1 * Revert "use tensorflow.compat.v1" This reverts commit 97a4ed923677c6dfd545fd654c55c424cf490a19. --- docs/en_US/Compressor/Overview.md | 6 +-- docs/en_US/Compressor/Pruner.md | 2 +- examples/model_compress/main_tf_pruner.py | 4 +- examples/model_compress/main_tf_quantizer.py | 4 +- examples/model_compress/main_torch_pruner.py | 6 +-- .../model_compress/main_torch_quantizer.py | 5 +- .../compression/tensorflow/builtin_pruners.py | 26 +++++----- .../tensorflow/builtin_quantizers.py | 17 +++--- .../nni/compression/tensorflow/compressor.py | 52 ++++++++----------- .../nni/compression/torch/builtin_pruners.py | 37 +++++-------- .../compression/torch/builtin_quantizers.py | 17 +++--- .../pynni/nni/compression/torch/compressor.py | 33 +++++------- src/sdk/pynni/tests/test_compressor.py | 6 +-- 13 files changed, 90 insertions(+), 125 deletions(-) diff --git a/docs/en_US/Compressor/Overview.md b/docs/en_US/Compressor/Overview.md index 96453caad5..e3b0fb7c13 100644 --- a/docs/en_US/Compressor/Overview.md +++ b/docs/en_US/Compressor/Overview.md @@ -7,7 +7,7 @@ We have provided two naive compression algorithms and four popular ones for user |Name|Brief Introduction of Algorithm| |---|---| | [Level Pruner](./Pruner.md#level-pruner) | Pruning the specified ratio on each weight based on absolute values of weights | -| [AGP Pruner](./Pruner.md#agp-pruner) | To prune, or not to prune: exploring the efficacy of pruning for model compression. [Reference Paper](https://arxiv.org/abs/1710.01878)| +| [AGP Pruner](./Pruner.md#agp-pruner) | Automated gradual pruning (To prune, or not to prune: exploring the efficacy of pruning for model compression) [Reference Paper](https://arxiv.org/abs/1710.01878)| | [Sensitivity Pruner](./Pruner.md#sensitivity-pruner) | Learning both Weights and Connections for Efficient Neural Networks. [Reference Paper](https://arxiv.org/abs/1506.02626)| | [Naive Quantizer](./Quantizer.md#naive-quantizer) | Quantize weights to default 8 bits | | [QAT Quantizer](./Quantizer.md#qat-quantizer) | Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference. [Reference Paper](http://openaccess.thecvf.com/content_cvpr_2018/papers/Jacob_Quantization_and_Training_CVPR_2018_paper.pdf)| @@ -72,7 +72,7 @@ It means following the algorithm's default setting for compressed operations wit ### Other APIs -Some compression algorithms use epochs to control the progress of compression, and some algorithms need to do something after every minibatch. Therefore, we provide another two APIs for users to invoke. One is `update_epoch`, you can use it as follows: +Some compression algorithms use epochs to control the progress of compression (e.g. [AGP](./Pruner.md#agp-pruner)), and some algorithms need to do something after every minibatch. Therefore, we provide another two APIs for users to invoke. One is `update_epoch`, you can use it as follows: Tensorflow code ```python @@ -138,7 +138,7 @@ Some algorithms may want global information for generating masks, for example, a The interface for customizing quantization algorithm is similar to that of pruning algorithms. The only difference is that `calc_mask` is replaced with `quantize_weight`. `quantize_weight` directly returns the quantized weights rather than mask, because for quantization the quantized weights cannot be obtained by applying mask. -``` +```python # This is writing a Quantizer in tensorflow. # For writing a Quantizer in PyTorch, you can simply replace # nni.compression.tensorflow.Quantizer with diff --git a/docs/en_US/Compressor/Pruner.md b/docs/en_US/Compressor/Pruner.md index 59db5b16c8..c6c74efd8a 100644 --- a/docs/en_US/Compressor/Pruner.md +++ b/docs/en_US/Compressor/Pruner.md @@ -38,7 +38,7 @@ In [To prune, or not to prune: exploring the efficacy of pruning for model compr >The binary weight masks are updated every ∆t steps as the network is trained to gradually increase the sparsity of the network while allowing the network training steps to recover from any pruning-induced loss in accuracy. In our experience, varying the pruning frequency ∆t between 100 and 1000 training steps had a negligible impact on the final model quality. Once the model achieves the target sparsity sf , the weight masks are no longer updated. The intuition behind this sparsity function in equation ### Usage -You can prune all weight from %0 to 80% sparsity in 10 epoch with the code below. +You can prune all weight from 0% to 80% sparsity in 10 epoch with the code below. First, you should import pruner and add mask to model. diff --git a/examples/model_compress/main_tf_pruner.py b/examples/model_compress/main_tf_pruner.py index 3c6acc3d5c..b00a01925f 100644 --- a/examples/model_compress/main_tf_pruner.py +++ b/examples/model_compress/main_tf_pruner.py @@ -127,4 +127,6 @@ def main(): }) print('final result is', test_acc) -main() + +if __name__ == '__main__': + main() diff --git a/examples/model_compress/main_tf_quantizer.py b/examples/model_compress/main_tf_quantizer.py index cc6e0b8fc9..c1e6214ebf 100644 --- a/examples/model_compress/main_tf_quantizer.py +++ b/examples/model_compress/main_tf_quantizer.py @@ -114,4 +114,6 @@ def main(): }) print('final result is', test_acc) -main() + +if __name__ == '__main__': + main() diff --git a/examples/model_compress/main_torch_pruner.py b/examples/model_compress/main_torch_pruner.py index 39ceb378a1..b8474a8a00 100644 --- a/examples/model_compress/main_torch_pruner.py +++ b/examples/model_compress/main_torch_pruner.py @@ -89,7 +89,7 @@ def main(): test(model, device, test_loader) pruner.update_epoch(epoch) - - -main() + +if __name__ == '__main__': + main() diff --git a/examples/model_compress/main_torch_quantizer.py b/examples/model_compress/main_torch_quantizer.py index 27e3dcfac0..adbfab0582 100644 --- a/examples/model_compress/main_torch_quantizer.py +++ b/examples/model_compress/main_torch_quantizer.py @@ -81,7 +81,6 @@ def main(): train(model, device, train_loader, optimizer) test(model, device, test_loader) - - -main() +if __name__ == '__main__': + main() diff --git a/src/sdk/pynni/nni/compression/tensorflow/builtin_pruners.py b/src/sdk/pynni/nni/compression/tensorflow/builtin_pruners.py index b014d8bc99..c2b7e4453d 100644 --- a/src/sdk/pynni/nni/compression/tensorflow/builtin_pruners.py +++ b/src/sdk/pynni/nni/compression/tensorflow/builtin_pruners.py @@ -10,8 +10,8 @@ class LevelPruner(Pruner): def __init__(self, config_list): """ - Configure Args: - sparsity + config_list: supported keys: + - sparsity """ super().__init__(config_list) @@ -21,8 +21,7 @@ def calc_mask(self, weight, config, **kwargs): class AGP_Pruner(Pruner): - """ - An automated gradual pruning algorithm that prunes the smallest magnitude + """An automated gradual pruning algorithm that prunes the smallest magnitude weights to achieve a preset level of network sparsity. Michael Zhu and Suyog Gupta, "To prune, or not to prune: exploring the @@ -32,12 +31,12 @@ class AGP_Pruner(Pruner): """ def __init__(self, config_list): """ - Configure Args - initial_sparsity: - final_sparsity: you should make sure initial_sparsity <= final_sparsity - start_epoch: start epoch numer begin update mask - end_epoch: end epoch number stop update mask - frequency: if you want update every 2 epoch, you can set it 2 + config_list: supported keys: + - initial_sparsity + - final_sparsity: you should make sure initial_sparsity <= final_sparsity + - start_epoch: start epoch numer begin update mask + - end_epoch: end epoch number stop update mask + - frequency: if you want update every 2 epoch, you can set it 2 """ super().__init__(config_list) self.now_epoch = tf.Variable(0) @@ -77,8 +76,7 @@ def update_epoch(self, epoch, sess): class SensitivityPruner(Pruner): - """ - Use algorithm from "Learning both Weights and Connections for Efficient Neural Networks" + """Use algorithm from "Learning both Weights and Connections for Efficient Neural Networks" https://arxiv.org/pdf/1506.02626v3.pdf I.e.: "The pruning threshold is chosen as a quality parameter multiplied @@ -86,8 +84,8 @@ class SensitivityPruner(Pruner): """ def __init__(self, config_list): """ - Configure Args: - sparsity: chosen pruning sparsity + config_list: supported keys + - sparsity: chosen pruning sparsity """ super().__init__(config_list) self.layer_mask = {} diff --git a/src/sdk/pynni/nni/compression/tensorflow/builtin_quantizers.py b/src/sdk/pynni/nni/compression/tensorflow/builtin_quantizers.py index 3dde1f2f1c..a7ed2b9338 100644 --- a/src/sdk/pynni/nni/compression/tensorflow/builtin_quantizers.py +++ b/src/sdk/pynni/nni/compression/tensorflow/builtin_quantizers.py @@ -8,8 +8,7 @@ class NaiveQuantizer(Quantizer): - """ - quantize weight to 8 bits + """quantize weight to 8 bits """ def __init__(self, config_list): super().__init__(config_list) @@ -24,15 +23,14 @@ def quantize_weight(self, weight, config, op_name, **kwargs): class QAT_Quantizer(Quantizer): - """ - Quantizer using the DoReFa scheme, as defined in: + """Quantizer using the DoReFa scheme, as defined in: Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference http://openaccess.thecvf.com/content_cvpr_2018/papers/Jacob_Quantization_and_Training_CVPR_2018_paper.pdf """ def __init__(self, config_list): """ - Configure Args: - q_bits + config_list: supported keys: + - q_bits """ super().__init__(config_list) @@ -50,15 +48,14 @@ def quantize_weight(self, weight, config, **kwargs): class DoReFaQuantizer(Quantizer): - """ - Quantizer using the DoReFa scheme, as defined in: + """Quantizer using the DoReFa scheme, as defined in: Zhou et al., DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients (https://arxiv.org/abs/1606.06160) """ def __init__(self, config_list): """ - Configure Args: - q_bits + config_list: supported keys: + - q_bits """ super().__init__(config_list) diff --git a/src/sdk/pynni/nni/compression/tensorflow/compressor.py b/src/sdk/pynni/nni/compression/tensorflow/compressor.py index 3e8b638054..3c0cf04d1c 100644 --- a/src/sdk/pynni/nni/compression/tensorflow/compressor.py +++ b/src/sdk/pynni/nni/compression/tensorflow/compressor.py @@ -13,20 +13,21 @@ def __init__(self, op): class Compressor: - """ - Abstract base TensorFlow compressor - """ + """Abstract base TensorFlow compressor""" + def __init__(self, config_list): self._bound_model = None self._config_list = config_list def __call__(self, model): + """Compress given graph with algorithm implemented by subclass. + The graph will be editted and returned. + """ self.compress(model) return model def compress(self, model): - """ - Compress given graph with algorithm implemented by subclass. + """Compress given graph with algorithm implemented by subclass. This will edit the graph. """ assert self._bound_model is None, "Each NNI compressor instance can only compress one model" @@ -39,30 +40,26 @@ def compress(self, model): self._instrument_layer(layer, config) def compress_default_graph(self): - """ - Compress the default graph with algorithm implemented by subclass. - This will edit the graph. + """Compress the default graph with algorithm implemented by subclass. + This will edit the default graph. """ self.compress(tf.get_default_graph()) def bind_model(self, model): - """ - This method is called when a model is bound to the compressor. - Users can optionally overload this method to do model-specific initialization. + """This method is called when a model is bound to the compressor. + Compressors can optionally overload this method to do model-specific initialization. It is guaranteed that only one model will be bound to each compressor instance. """ pass def update_epoch(self, epoch, sess): - """ - if user want to update mask every epoch, user can override this method + """If user want to update mask every epoch, user can override this method """ pass def step(self, sess): - """ - if user want to update mask every step, user can override this method + """If user want to update mask every step, user can override this method """ pass @@ -87,15 +84,13 @@ def _select_config(self, layer): class Pruner(Compressor): - """ - Abstract base TensorFlow pruner - """ + """Abstract base TensorFlow pruner""" + def __init__(self, config_list): super().__init__(config_list) def calc_mask(self, weight, config, op, op_type, op_name): - """ - Pruners should overload this method to provide mask for weight tensors. + """Pruners should overload this method to provide mask for weight tensors. The mask must have the same shape and type comparing to the weight. It will be applied with `multiply()` operation. This method works as a subgraph which will be inserted into the bound model. @@ -103,13 +98,11 @@ def calc_mask(self, weight, config, op, op_type, op_name): raise NotImplementedError("Pruners must overload calc_mask()") def _instrument_layer(self, layer, config): - """ - it seems the graph editor can only swap edges of nodes or remove all edges from a node - it cannot remove one edge from a node, nor can it assign a new edge to a node - we assume there is a proxy operation between the weight and the Conv2D layer - this is true as long as the weight is `tf.Value` - not sure what will happen if the weight is calculated from other operations - """ + # it seems the graph editor can only swap edges of nodes or remove all edges from a node + # it cannot remove one edge from a node, nor can it assign a new edge to a node + # we assume there is a proxy operation between the weight and the Conv2D layer + # this is true as long as the weight is `tf.Value` + # not sure what will happen if the weight is calculated from other operations weight_index = _detect_weight_index(layer) if weight_index is None: _logger.warning('Failed to detect weight for layer {}'.format(layer.name)) @@ -122,9 +115,8 @@ def _instrument_layer(self, layer, config): class Quantizer(Compressor): - """ - Abstract base TensorFlow quantizer - """ + """Abstract base TensorFlow quantizer""" + def __init__(self, config_list): super().__init__(config_list) diff --git a/src/sdk/pynni/nni/compression/torch/builtin_pruners.py b/src/sdk/pynni/nni/compression/torch/builtin_pruners.py index 7309ce1eb3..858db63a94 100644 --- a/src/sdk/pynni/nni/compression/torch/builtin_pruners.py +++ b/src/sdk/pynni/nni/compression/torch/builtin_pruners.py @@ -12,19 +12,8 @@ class LevelPruner(Pruner): """ def __init__(self, config_list): """ - we suggest user to use json configure list, like [{},{}...], to set configure - format : - [ - { - 'sparsity': 0, - 'support_type': 'default' - }, - { - 'sparsity': 50, - 'support_op': conv1 - } - ] - if you want input multiple configure from file, you'd better use load_configure_file(path) to load + config_list: supported keys: + - sparsity """ super().__init__(config_list) @@ -38,8 +27,7 @@ def calc_mask(self, weight, config, **kwargs): class AGP_Pruner(Pruner): - """ - An automated gradual pruning algorithm that prunes the smallest magnitude + """An automated gradual pruning algorithm that prunes the smallest magnitude weights to achieve a preset level of network sparsity. Michael Zhu and Suyog Gupta, "To prune, or not to prune: exploring the @@ -49,12 +37,12 @@ class AGP_Pruner(Pruner): """ def __init__(self, config_list): """ - Configure Args - initial_sparsity - final_sparsity: you should make sure initial_sparsity <= final_sparsity - start_epoch: start epoch numer begin update mask - end_epoch: end epoch number stop update mask, you should make sure start_epoch <= end_epoch - frequency: if you want update every 2 epoch, you can set it 2 + config_list: supported keys: + - initial_sparsity + - final_sparsity: you should make sure initial_sparsity <= final_sparsity + - start_epoch: start epoch numer begin update mask + - end_epoch: end epoch number stop update mask, you should make sure start_epoch <= end_epoch + - frequency: if you want update every 2 epoch, you can set it 2 """ super().__init__(config_list) self.mask_list = {} @@ -99,8 +87,7 @@ def update_epoch(self, epoch): class SensitivityPruner(Pruner): - """ - Use algorithm from "Learning both Weights and Connections for Efficient Neural Networks" + """Use algorithm from "Learning both Weights and Connections for Efficient Neural Networks" https://arxiv.org/pdf/1506.02626v3.pdf I.e.: "The pruning threshold is chosen as a quality parameter multiplied @@ -108,8 +95,8 @@ class SensitivityPruner(Pruner): """ def __init__(self, config_list): """ - configure Args: - sparsity: chosen pruning sparsity + config_list: supported keys: + - sparsity: chosen pruning sparsity """ super().__init__(config_list) self.mask_list = {} diff --git a/src/sdk/pynni/nni/compression/torch/builtin_quantizers.py b/src/sdk/pynni/nni/compression/torch/builtin_quantizers.py index 9f2b9ccd95..f5b4b5223a 100644 --- a/src/sdk/pynni/nni/compression/torch/builtin_quantizers.py +++ b/src/sdk/pynni/nni/compression/torch/builtin_quantizers.py @@ -8,8 +8,7 @@ class NaiveQuantizer(Quantizer): - """ - quantize weight to 8 bits + """quantize weight to 8 bits """ def __init__(self, config_list): super().__init__(config_list) @@ -24,15 +23,14 @@ def quantize_weight(self, weight, config, op_name, **kwargs): class QAT_Quantizer(Quantizer): - """ - Quantizer using the DoReFa scheme, as defined in: + """Quantizer using the DoReFa scheme, as defined in: Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference http://openaccess.thecvf.com/content_cvpr_2018/papers/Jacob_Quantization_and_Training_CVPR_2018_paper.pdf """ def __init__(self, config_list): """ - Configure Args: - q_bits + config_list: supported keys: + - q_bits """ super().__init__(config_list) @@ -51,15 +49,14 @@ def quantize_weight(self, weight, config, **kwargs): class DoReFaQuantizer(Quantizer): - """ - Quantizer using the DoReFa scheme, as defined in: + """Quantizer using the DoReFa scheme, as defined in: Zhou et al., DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients (https://arxiv.org/abs/1606.06160) """ def __init__(self, config_list): """ - configure Args: - q_bits + config_list: supported keys: + - q_bits """ super().__init__(config_list) diff --git a/src/sdk/pynni/nni/compression/torch/compressor.py b/src/sdk/pynni/nni/compression/torch/compressor.py index 6282a2138c..5d74a464b0 100644 --- a/src/sdk/pynni/nni/compression/torch/compressor.py +++ b/src/sdk/pynni/nni/compression/torch/compressor.py @@ -15,9 +15,8 @@ def __init__(self, name, module): class Compressor: - """ - Abstract base PyTorch compressor - """ + """Abstract base PyTorch compressor""" + def __init__(self, config_list): self._bound_model = None self._config_list = config_list @@ -27,8 +26,7 @@ def __call__(self, model): return model def compress(self, model): - """ - Compress the model with algorithm implemented by subclass. + """Compress the model with algorithm implemented by subclass. The model will be instrumented and user should never edit it after calling this method. """ assert self._bound_model is None, "Each NNI compressor instance can only compress one model" @@ -42,22 +40,19 @@ def compress(self, model): def bind_model(self, model): - """ - This method is called when a model is bound to the compressor. + """This method is called when a model is bound to the compressor. Users can optionally overload this method to do model-specific initialization. It is guaranteed that only one model will be bound to each compressor instance. """ pass def update_epoch(self, epoch): - """ - if user want to update model every epoch, user can override this method + """if user want to update model every epoch, user can override this method """ pass def step(self): - """ - if user want to update model every step, user can override this method + """if user want to update model every step, user can override this method """ pass @@ -82,15 +77,13 @@ def _select_config(self, layer): class Pruner(Compressor): - """ - Abstract base PyTorch pruner - """ + """Abstract base PyTorch pruner""" + def __init__(self, config_list): super().__init__(config_list) def calc_mask(self, weight, config, op, op_type, op_name): - """ - Pruners should overload this method to provide mask for weight tensors. + """Pruners should overload this method to provide mask for weight tensors. The mask must have the same shape and type comparing to the weight. It will be applied with `mul()` operation. This method is effectively hooked to `forward()` method of the model. @@ -122,9 +115,8 @@ def new_forward(*input): class Quantizer(Compressor): - """ - Base quantizer for pytorch quantizer - """ + """Base quantizer for pytorch quantizer""" + def __init__(self, config_list): super().__init__(config_list) @@ -133,8 +125,7 @@ def __call__(self, model): return model def quantize_weight(self, weight, config, op, op_type, op_name): - """ - user should know where dequantize goes and implement it in quantize method + """user should know where dequantize goes and implement it in quantize method we now do not provide dequantize method """ raise NotImplementedError("Quantizer must overload quantize_weight()") diff --git a/src/sdk/pynni/tests/test_compressor.py b/src/sdk/pynni/tests/test_compressor.py index 1c6021b0cd..83735a20a2 100644 --- a/src/sdk/pynni/tests/test_compressor.py +++ b/src/sdk/pynni/tests/test_compressor.py @@ -1,9 +1,9 @@ from unittest import TestCase, main -import nni.compression.tensorflow as tf_compressor -import nni.compression.torch as torch_compressor +import tensorflow as tf import torch import torch.nn.functional as F -import tensorflow as tf +import nni.compression.tensorflow as tf_compressor +import nni.compression.torch as torch_compressor def weight_variable(shape): return tf.Variable(tf.truncated_normal(shape, stddev = 0.1))