From 0a4b7b0bb1b523185bb21a7605bf0f22a7a1c396 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 14 May 2020 01:26:56 +0000 Subject: [PATCH 01/47] Add analysis tools for sensitivity and topology. Signed-off-by: Ningxin --- src/sdk/pynni/nni/analysis_utils/__init__.py | 0 .../analysis_utils/sensitivity/__init__.py | 2 + .../sensitivity/torch/__init__.py | 4 + .../sensitivity/torch/sensitivity_analysis.py | 200 ++++++++++++++++++ .../nni/analysis_utils/topology/__init__.py | 0 .../analysis_utils/topology/torch/__init__.py | 0 .../topology/torch/graph_from_trace.py | 150 +++++++++++++ .../topology/torch/shape_depedency.py | 95 +++++++++ .../analysis_utils/visualization/__init__.py | 0 .../visualization/torch/__init__.py | 0 10 files changed, 451 insertions(+) create mode 100644 src/sdk/pynni/nni/analysis_utils/__init__.py create mode 100644 src/sdk/pynni/nni/analysis_utils/sensitivity/__init__.py create mode 100644 src/sdk/pynni/nni/analysis_utils/sensitivity/torch/__init__.py create mode 100644 src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py create mode 100644 src/sdk/pynni/nni/analysis_utils/topology/__init__.py create mode 100644 src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py create mode 100644 src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py create mode 100644 src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py create mode 100644 src/sdk/pynni/nni/analysis_utils/visualization/__init__.py create mode 100644 src/sdk/pynni/nni/analysis_utils/visualization/torch/__init__.py diff --git a/src/sdk/pynni/nni/analysis_utils/__init__.py b/src/sdk/pynni/nni/analysis_utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/__init__.py b/src/sdk/pynni/nni/analysis_utils/sensitivity/__init__.py new file mode 100644 index 0000000000..9a0454564d --- /dev/null +++ b/src/sdk/pynni/nni/analysis_utils/sensitivity/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/__init__.py b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/__init__.py new file mode 100644 index 0000000000..f8093828b6 --- /dev/null +++ b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from .sensitivity_analysis import SensitivityAnalysis diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py new file mode 100644 index 0000000000..2212306817 --- /dev/null +++ b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -0,0 +1,200 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import torch +import copy +import json +import numpy as np +import torch.nn as nn +from collections import OrderedDict +from nni.compression.torch import LevelPruner +from nni.compression.torch import L1FilterPruner +from nni.compression.torch import L2FilterPruner + + +SUPPORTED_OP_NAME = ['Conv2d', 'Conv1d'] +SUPPORTED_OP_TYPE = [getattr(nn, name) for name in SUPPORTED_OP_NAME] + + +class SensitivityAnalysis: + def __init__(self, model, val_func, ratio_step=0.1): + # TODO Speedup by ratio_threshold or list + # TODO l1 or l2 seted here + """ + Perform sensitivity analysis for this model. + Parameters + ---------- + model: + the model to perform sensitivity analysis + val_func: + validation function for the model. Due to + different models may need different dataset/criterion + , therefore the user need to cover this part by themselves. + val_func take the model as the first input parameter, and + return the accuracy as output. + ratio_step: + the step to change the prune ratio during the analysis + """ + self.model = model + self.val_func = val_func + self.ratio_step = ratio_step + self.target_layer = OrderedDict() + self.ori_state_dict = copy.deepcopy(self.model.state_dict()) + self.target_layer = {} + self.sensitivities = {} + # already_pruned is for the iterative sensitivity analysis + # For example, sensitivity_pruner iteratively prune the target + # model according to the sensitivity. After each round of + # pruning, the sensitivity_pruner will test the new sensitivity + # for each layer + self.already_pruned = {} + self.model_parse() + + @property + def layers_count(self): + return len(self.target_layer) + + def model_parse(self): + for name, submodel in self.model.named_modules(): + for op_type in SUPPORTED_OP_TYPE: + if isinstance(submodel, op_type): + self.target_layer[name] = submodel + self.already_pruned[name] = 0 + + def analysis(self, start=0, end=None, type='l1'): + """ + This function analyze the sensitivity to pruning for + each conv layer in the target model. + If %start and %end are not set, we analyze all the conv + layers by default. Users can specify several layers to + analyze or parallelize the analysis process easily through + the %start and %end parameter. + + Parameters + ---------- + start: + Layer index of the sensitivity analysis start + end: + Layer index of the sensitivity analysis end + type: + Prune type of the Conv layers (l1/l2) + + Returns + ------- + sensitivities: + dict object that stores the trajectory of the + accuracy when the prune ratio changes + """ + if not end: + end = self.layers_count + assert start >= 0 and end <= self.layers_count + assert start <= end + namelist = list(self.target_layer.keys()) + for layerid in range(start, end): + name = namelist[layerid] + self.sensitivities[name] = {} + for prune_ratio in np.arange(self.ratio_step, 1.0, self.ratio_step): + print('PruneRatio: ', prune_ratio) + prune_ratio = np.round(prune_ratio, 2) + # Calculate the actual prune ratio based on the already pruned ratio + prune_ratio = ( + 1.0 - self.already_pruned[name]) * prune_ratio + self.already_pruned[name] + cfg = [{'sparsity': prune_ratio, 'op_names': [ + name], 'op_types': ['Conv2d']}] + pruner = L1FilterPruner(self.model, cfg) + pruner.compress() + val_acc = self.val_func(self.model) + self.sensitivities[name][prune_ratio] = val_acc + pruner._unwrap_model() + # TODO outside the ratio loop + # reset the weights pruned by the pruner + self.model.load_state_dict(self.ori_state_dict) + # print('Reset') + # print(self.val_func(self.model)) + del pruner + return self.sensitivities + + def visualization(self, outdir, merge=False): + """ + # + Visualize the sensitivity curves of the model + + Parameters + ---------- + outdir: + output directory of the image + merge: + if merge all the sensitivity curves into a + single image. If not, we will draw a picture + for each target layer of the model. + """ + os.makedirs(outdir, exist_ok=True) + import matplotlib + # use Agg backend + matplotlib.use('Agg') + import matplotlib.pyplot as plt + from matplotlib.lines import Line2D + LineStyles = [':', '-.', '--', '-'] + Markers = list(Line2D.markers.keys()) + if not merge: + # Draw the sensitivity curves for each layer first + for name in self.sensitivities: + X = list(self.sensitivities[name].keys()) + X = sorted(X) + Y = [self.sensitivities[name][x] for x in X] + plt.figure(figsize=(8, 4)) + plt.plot(X, Y, marker='*') + plt.xlabel('Prune Ratio') + plt.ylabel('Validation Accuracy') + plt.title(name) + plt.tight_layout() + filepath = os.path.join(outdir, '%s.jpg' % name) + plt.savefig(filepath) + plt.close() + else: + plt.figure() + styleid = 0 + for name in self.sensitivities: + X = list(self.sensitivities[name].keys()) + X = sorted(X) + Y = [self.sensitivities[name][x] for x in X] + linestyle = LineStyles[styleid % len(LineStyles)] + marker = Markers[styleid % len(Markers)] + plt.plot(X, Y, label=name, linestyle=linestyle, marker=marker) + plt.xlabel('Prune Ratio') + plt.ylabel('Validation Accuracy') + plt.legend(loc='center left', bbox_to_anchor=(1.02, 0.5)) + plt.tight_layout() + filepath = os.path.join(outdir, 'all.jpg') + plt.savefig(filepath, dpi=1000, bbox_inches='tight') + styleid += 1 + plt.close() + + def export(self, filepath): + """ + #TODO CSV + Export the results of the sensitivity analysis + to a json file. + + Parameters + ---------- + filepath: + Path of the output file + """ + # TODO csv + with open(filepath, 'w') as jf: + json.dump(self.sensitivities, jf, indent=4) + + def update_already_pruned(self, layername, ratio): + """ + Set the already pruned ratio for the target layer. + """ + self.already_pruned[layername] = ratio + + def load_state_dict(self, state_dict): + """ + Update the weight of the model + """ + self.ori_state_dict = copy.deepcopy(state_dict) + self.model.load_state_dict(self.ori_state_dict) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/__init__.py b/src/sdk/pynni/nni/analysis_utils/topology/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py new file mode 100644 index 0000000000..ba8d9366d9 --- /dev/null +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -0,0 +1,150 @@ +import re +import torch +import logging +import torch.nn as nn +import torch.jit as jit +from torch.utils.tensorboard._pytorch_graph import CLASSTYPE_KIND, GETATTR_KIND + +__all__ = ["PyNode", "Graph_Builder"] + +TUPLE_UNPACK = 'prim::TupleUnpack' + +logger = logging.getLogger('Graph_From_Trace') + +class PyNode: + def __init__(self, cnode, isValue=False): + self.cnode = cnode + self.isValue = isValue + self.isTensor = False + self.isOp = not self.isValue + if self.isValue: + if isinstance(self.cnode.type(), torch._C.TensorType): + self.isTensor = True + self.shape = self.cnode.type().sizes() + if self.isOp: + self.name = cnode.scopeName() + # remove the __module prefix + if self.name.startswith('__module.'): + self.name = self.name[len('__module.'):] + + def __str__(self): + if self.isTensor: + name = 'Tensor: {}'.format(self.shape) + elif self.isOp: + name = self.cnode.kind() + name = re.split('::', name)[1] + scope = self.cnode.scopeName() + scope = re.split('/', scope) + if len(scope) > 0: + name = scope[-1] + '\nType: ' + name + else: + name = str(self.cnode.type()) + return name + + def parents(self): + if self.isOp: + return list(self.cnode.inputs()) + else: + return [self.cnode.node()] + + +class Graph_Builder: + def __init__(self, model, data): + """ + input: + model: model to build the network architecture + data: input data for the model + We build the network architecture graph according the graph + in the scriptmodule. + """ + self.model = model + self.data = data + self.traced_model = jit.trace(model, data) + self.forward_edge = {} + self.graph = self.traced_model.graph + # Start from pytorch 1.4.0, we need this function to get more + # detail information + torch._C._jit_pass_inline(self.graph) + self.c2py = {} + self.visited = set() + self.build_graph() + self.unpack_tuple() + + def unpack_tuple(self): + """ + jit.trace also traces the tuple creation and unpack, which makes + the grapgh complex and difficult to understand. Therefore, we + """ + for node in self.graph.nodes(): + if node.kind() == TUPLE_UNPACK: + in_tuple = list(node.inputs())[0] + parent_node = in_tuple.node() + in_tensors = list(parent_node.inputs()) + out_tensors = list(node.outputs()) + assert len(in_tensors) == len(out_tensors) + for i in range(len(in_tensors)): + ori_edges = self.forward_edge[in_tensors[i]] + # remove the out edge to the Tuple_construct OP node + self.forward_edge[in_tensors[i]] = list( + filter(lambda x: x != parent_node, ori_edges)) + # Directly connect to the output nodes of the out_tensors + self.forward_edge[in_tensors[i]].extend( + self.forward_edge[out_tensors[i]]) + + def build_graph(self): + for node in self.graph.nodes(): + self.c2py[node] = PyNode(node) + for input in node.inputs(): + if input not in self.c2py: + self.c2py[input] = PyNode(input, True) + if input in self.forward_edge: + self.forward_edge[input].append(node) + else: + self.forward_edge[input] = [node] + for output in node.outputs(): + if output not in self.c2py: + self.c2py[output] = PyNode(output, True) + if node in self.forward_edge: + self.forward_edge[node].append(output) + else: + self.forward_edge[node] = [output] + + def visual_traverse(self, curnode, graph, lastnode): + """" + Input: + curnode: current visiting node(tensor/module) + graph: the handle of the Dgraph + lastnode: the last visited node + """ + if curnode in self.visited: + if lastnode is not None: + graph.edge(str(id(lastnode)), str(id(curnode))) + return + self.visited.add(curnode) + name = str(self.c2py[curnode]) + if self.c2py[curnode].isOp: + graph.node(str(id(curnode)), name, shape='ellipse', color='orange') + else: + graph.node(str(id(curnode)), name, shape='box', color='lightblue') + if lastnode is not None: + graph.edge(str(id(lastnode)), str(id(curnode))) + if curnode in self.forward_edge: + for _next in self.forward_edge[curnode]: + self.visual_traverse(_next, graph, curnode) + + def visualization(self, filename, format='jpg'): + """ + visualize the network architecture automaticlly. + Input: + filename: the filename of the saved image file + format: the output format + debug: if enable the debug mode + """ + import graphviz + graph = graphviz.Digraph(format=format) + self.visited.clear() + for input in self.graph.inputs(): + if input.type().kind() == CLASSTYPE_KIND: + continue + self.visual_traverse(input, graph, None) + graph.render(filename) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py new file mode 100644 index 0000000000..fc32767104 --- /dev/null +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py @@ -0,0 +1,95 @@ +import torch +import queue +import logging +import torch.nn as nn + +from .graph_from_trace import * + + +CONV_TYPE = 'aten::_convolution' + +logger = logging.getLogger('Shape_Depedency') + + +class Prune_Check: + def __init__(self, model, data): + """ + Input: + model: model to be pruned + data: example input data to trace the network architecture + """ + self.graph_builder = Graph_Builder(model, data) + self.cnodes = list(self.graph_builder.graph.nodes()) + self.graph = self.graph_builder.graph + self.forward_edge = self.graph_builder.forward_edge + self.c2py = self.graph_builder.c2py + self.depedency = {} + self.build_channel_depedency() + self.visited = set() + + def get_parent_convs(self, node): + parent_convs = [] + queue = [] + queue.append(node) + while len(queue) > 0: + curnode = queue.pop(0) + print(curnode) + print() + if curnode in self.c2py and self.c2py[curnode].isOp \ + and curnode.kind() == CONV_TYPE: + # find the first met conv + parent_convs.append(curnode) + continue + parents = self.c2py[curnode].parents() + for parent in parents: + if parent in self.c2py and (self.c2py[parent].isOp or 'Tensor' in str(parent.type())): + # filter the scalar parameters of the functions + # only consider the Tensors/ List(Tensor) + queue.append(parent) + return parent_convs + + def build_channel_depedency(self): + for node in self.cnodes: + parent_convs = [] + if 'add' in node.kind(): + parent_convs = self.get_parent_convs(node) + if 'cat' in node.kind(): + cat_dim = list(node.inputs())[1].toIValue() + # N * C * H * W + if cat_dim != 1: + parent_convs = self.get_parent_convs(node) + depedency_set = set(parent_convs) + # merge the depedencies + for node in parent_convs: + if node in self.depedency: + depedency_set.update(self.depedency[node]) + # save the depedencies + for node in depedency_set: + self.depedency[node] = depedency_set + + def filter_prune_check(self, ratios): + """ + input: + ratios: the prune ratios for the layers + ratios is the dict, in which the keys are + the names of the target layer and the values + are the prune ratio for the corresponding layers + For example: + ratios = {'body.conv1': 0.5, 'body.conv2':0.5} + Note: the name of the layers should looks like + the names that model.named_modules() functions + returns. + """ + + for node in self.cnodes: + if node.kind() == CONV_TYPE and self.c2py[node].name in ratios: + if node not in self.depedency: + # this layer has no depedency on other layers + # it's legal to set any prune ratio between 0 and 1 + continue + for other in self.depedency[node]: + if self.c2py[other].name not in ratios: + return False + elif ratios[self.c2py[node].name] != ratios[self.c2py[other].name]: + return False + return True diff --git a/src/sdk/pynni/nni/analysis_utils/visualization/__init__.py b/src/sdk/pynni/nni/analysis_utils/visualization/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/sdk/pynni/nni/analysis_utils/visualization/torch/__init__.py b/src/sdk/pynni/nni/analysis_utils/visualization/torch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From 712d982d0c500c3d35e69305c9f766cc8fd9317b Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 14 May 2020 05:25:38 +0000 Subject: [PATCH 02/47] Reformat the code and add several small new features. Signed-off-by: Ningxin --- .../sensitivity/torch/sensitivity_analysis.py | 2 +- .../topology/torch/graph_from_trace.py | 78 ++++++++++----- .../topology/torch/shape_depedency.py | 97 +++++++++++++++---- 3 files changed, 133 insertions(+), 44 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py index 2212306817..0e901db39b 100644 --- a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -19,7 +19,7 @@ class SensitivityAnalysis: def __init__(self, model, val_func, ratio_step=0.1): - # TODO Speedup by ratio_threshold or list + # TODO Speedup by ratio_threshold or list # TODO l1 or l2 seted here """ Perform sensitivity analysis for this model. diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index ba8d9366d9..8da7663053 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -1,3 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + import re import torch import logging @@ -5,7 +8,7 @@ import torch.jit as jit from torch.utils.tensorboard._pytorch_graph import CLASSTYPE_KIND, GETATTR_KIND -__all__ = ["PyNode", "Graph_Builder"] +__all__ = ["PyNode", "GraphBuilder"] TUPLE_UNPACK = 'prim::TupleUnpack' @@ -22,24 +25,24 @@ def __init__(self, cnode, isValue=False): self.isTensor = True self.shape = self.cnode.type().sizes() if self.isOp: - self.name = cnode.scopeName() + scopename = cnode.scopeName() + scopename = re.split('/', scopename) + # note, the scopeName of node may be empty + self.name = scopename[-1] if len(scopename) > 0 else '' # remove the __module prefix if self.name.startswith('__module.'): self.name = self.name[len('__module.'):] def __str__(self): if self.isTensor: - name = 'Tensor: {}'.format(self.shape) + _str = 'Tensor: {}'.format(self.shape) elif self.isOp: - name = self.cnode.kind() - name = re.split('::', name)[1] - scope = self.cnode.scopeName() - scope = re.split('/', scope) - if len(scope) > 0: - name = scope[-1] + '\nType: ' + name + op_type = self.cnode.kind() + op_type = re.split('::', op_type)[1] + _str = self.name + '\nType: ' + op_type else: - name = str(self.cnode.type()) - return name + _str = str(self.cnode.type()) + return _str def parents(self): if self.isOp: @@ -48,14 +51,23 @@ def parents(self): return [self.cnode.node()] -class Graph_Builder: +class GraphBuilder: def __init__(self, model, data): """ - input: - model: model to build the network architecture - data: input data for the model - We build the network architecture graph according the graph - in the scriptmodule. + We build the network architecture graph according the graph + in the scriptmodule. However, the original graph from jit.trace + has lots of detailed information which make the graph complicated + and hard to understand. So we also store a copy of the network + architecture in the self.forward_edge. We will simplify the network + architecure (such as unpack_tuple, etc) stored in self.forward_edge + to make the graph more clear. + Parameters + ---------- + model: + The model to build the network architecture. + data: + The sample input data for the model. + """ self.model = model self.data = data @@ -74,6 +86,7 @@ def unpack_tuple(self): """ jit.trace also traces the tuple creation and unpack, which makes the grapgh complex and difficult to understand. Therefore, we + unpack the tuple handly to make the graph clear. """ for node in self.graph.nodes(): if node.kind() == TUPLE_UNPACK: @@ -92,6 +105,10 @@ def unpack_tuple(self): self.forward_edge[out_tensors[i]]) def build_graph(self): + """ + Copy the architecture information from the traced_model into + forward_edge. + """ for node in self.graph.nodes(): self.c2py[node] = PyNode(node) for input in node.inputs(): @@ -111,10 +128,16 @@ def build_graph(self): def visual_traverse(self, curnode, graph, lastnode): """" - Input: - curnode: current visiting node(tensor/module) - graph: the handle of the Dgraph - lastnode: the last visited node + Traverse the network and draw the nodes and edges + at the same time. + Parameters + ---------- + curnode: + Current visiting node(tensor/module). + graph: + The handle of the Dgraph. + lastnode: + The last visited node. """ if curnode in self.visited: if lastnode is not None: @@ -135,11 +158,16 @@ def visual_traverse(self, curnode, graph, lastnode): def visualization(self, filename, format='jpg'): """ visualize the network architecture automaticlly. - Input: - filename: the filename of the saved image file - format: the output format - debug: if enable the debug mode + Parameters + ---------- + filename: + The filename of the saved image file. + format: + The output format. """ + # TODO and detailed mode for the visualization function + # in which the graph will also contain all the weights/bias + # information. import graphviz graph = graphviz.Digraph(format=format) self.visited.clear() diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py index fc32767104..ccaf3d7893 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py @@ -1,3 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import csv import torch import queue import logging @@ -11,30 +15,47 @@ logger = logging.getLogger('Shape_Depedency') -class Prune_Check: +class ChannelDepedency: def __init__(self, model, data): """ - Input: - model: model to be pruned - data: example input data to trace the network architecture + This model analyze the channel depedencis between the conv + layers in a model. + + Parameters + ---------- + model: + The model to be analyzed. + data: + The example input data to trace the network architecture. """ - self.graph_builder = Graph_Builder(model, data) + self.graph_builder = GraphBuilder(model, data) self.cnodes = list(self.graph_builder.graph.nodes()) self.graph = self.graph_builder.graph self.forward_edge = self.graph_builder.forward_edge self.c2py = self.graph_builder.c2py self.depedency = {} self.build_channel_depedency() - self.visited = set() + def get_parent_convs(self, node): + """ + Find the nearest father conv layers for the target node. + + Parameters + --------- + node: + target node. + + Returns + ------- + parent_convs: + nearest father conv layers for the target worknode. + """ parent_convs = [] queue = [] queue.append(node) while len(queue) > 0: curnode = queue.pop(0) - print(curnode) - print() if curnode in self.c2py and self.c2py[curnode].isOp \ and curnode.kind() == CONV_TYPE: # find the first met conv @@ -49,6 +70,10 @@ def get_parent_convs(self, node): return parent_convs def build_channel_depedency(self): + """ + Build the channel depedency for the conv layers + in the model. + """ for node in self.cnodes: parent_convs = [] if 'add' in node.kind(): @@ -69,16 +94,25 @@ def build_channel_depedency(self): def filter_prune_check(self, ratios): """ - input: - ratios: the prune ratios for the layers - ratios is the dict, in which the keys are - the names of the target layer and the values - are the prune ratio for the corresponding layers - For example: - ratios = {'body.conv1': 0.5, 'body.conv2':0.5} - Note: the name of the layers should looks like - the names that model.named_modules() functions - returns. + According to the channel depedencies between the conv + layers, check if the filter pruning ratio for the conv + layers is legal. + + Parameters + --------- + ratios: + the prune ratios for the layers. %ratios is a dict, + in which the keys are the names of the target layer + and the values are the prune ratio for the corresponding + layers. For example: + ratios = {'body.conv1': 0.5, 'body.conv2':0.5} + Note: the name of the layers should looks like + the names that model.named_modules() functions + returns. + + Returns + ------- + True/False """ for node in self.cnodes: @@ -93,3 +127,30 @@ def filter_prune_check(self, ratios): elif ratios[self.c2py[node].name] != ratios[self.c2py[other].name]: return False return True + + def export(self, filepath): + """ + export the channel depedencies as a csv file. + """ + header = ['Depedency Set', 'Convolutional Layers'] + setid = 0 + visited = set() + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf, delimiter=',') + csv_w.writerow(header) + for node in self.cnodes: + if node.kind() != CONV_TYPE or node in visited: + continue + setid += 1 + row = ['Set %d' % setid] + if node not in self.depedency: + visited.add(node) + row.append(self.c2py[node].name) + else: + for other in self.depedency[node]: + visited.add(other) + row.append(self.c2py[other].name) + csv_w.writerow(row) + + + From 202593cb788a6d06673973514418525e659724ff Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 14 May 2020 11:20:59 +0000 Subject: [PATCH 03/47] Add the flops information rendering for the visulization. Signed-off-by: Ningxin --- .../topology/torch/graph_from_trace.py | 83 +++++++++++++++++-- 1 file changed, 76 insertions(+), 7 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 8da7663053..a211b0078d 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -2,6 +2,8 @@ # Licensed under the MIT license. import re +import os +import csv import torch import logging import torch.nn as nn @@ -14,6 +16,7 @@ logger = logging.getLogger('Graph_From_Trace') + class PyNode: def __init__(self, cnode, isValue=False): self.cnode = cnode @@ -126,7 +129,7 @@ def build_graph(self): else: self.forward_edge[node] = [output] - def visual_traverse(self, curnode, graph, lastnode): + def visual_traverse(self, curnode, graph, lastnode, cfg): """" Traverse the network and draw the nodes and edges at the same time. @@ -138,24 +141,36 @@ def visual_traverse(self, curnode, graph, lastnode): The handle of the Dgraph. lastnode: The last visited node. + cfg: + Dict object to specify the rendering + configuration for operation node. + key is the name of the operation, + value is a also a dict. For example, + {'conv1': {'shape':'box', 'color':'red'}} """ if curnode in self.visited: if lastnode is not None: graph.edge(str(id(lastnode)), str(id(curnode))) return self.visited.add(curnode) - name = str(self.c2py[curnode]) + tmp_str = str(self.c2py[curnode]) if self.c2py[curnode].isOp: - graph.node(str(id(curnode)), name, shape='ellipse', color='orange') + name = self.c2py[curnode].name + # default render configuration + render_cfg = {'shape': 'ellipse', 'style': 'solid'} + if name in cfg: + render_cfg = cfg[name] + graph.node(str(id(curnode)), tmp_str, **render_cfg) else: - graph.node(str(id(curnode)), name, shape='box', color='lightblue') + graph.node(str(id(curnode)), tmp_str, shape='box', + color='lightblue', style='dashed') if lastnode is not None: graph.edge(str(id(lastnode)), str(id(curnode))) if curnode in self.forward_edge: for _next in self.forward_edge[curnode]: - self.visual_traverse(_next, graph, curnode) + self.visual_traverse(_next, graph, curnode, cfg) - def visualization(self, filename, format='jpg'): + def base_visualization(self, filename, format='jpg', cfg=None): """ visualize the network architecture automaticlly. Parameters @@ -168,11 +183,65 @@ def visualization(self, filename, format='jpg'): # TODO and detailed mode for the visualization function # in which the graph will also contain all the weights/bias # information. + if not cfg: + cfg = {} import graphviz graph = graphviz.Digraph(format=format) self.visited.clear() for input in self.graph.inputs(): if input.type().kind() == CLASSTYPE_KIND: continue - self.visual_traverse(input, graph, None) + self.visual_traverse(input, graph, None, cfg) graph.render(filename) + + def visualize_with_flops(self, filepath, format, flops_file): + assert os.path.exists(flops_file) + f_handle = open(flops_file, 'r') + csv_r = csv.reader(f_handle) + flops = {} + # skip the header of the csv file + header = next(csv_r) + for row in csv_r: + if(len(row) == 2): + layername = row[0] + _flops = float(row[1]) + flops[layername] = _flops + + f_handle.close() + # Divide the flops of the layers into 11 levels + # We use the 'rdylgn11 color scheme' to present + # the number of the flops, in which we have 11 colors + # range from green to red. + _min_flops = min(flops.values()) + _max_flops = max(flops.values()) + color_scheme_count = 9 + flops_step = (_max_flops - _min_flops) / (color_scheme_count-1) + + cfgs = {} + for layername in flops: + flops_level = (flops[layername] - _min_flops) / flops_step + # flops_level = color_scheme_count - int(round(flops_level)) + flops_level = int(round(flops_level)) + 1 + render_cfg = render_cfg = {'shape': 'ellipse', + 'fillcolor': "/reds9/"+str(flops_level), 'style': 'filled'} + cfgs[layername] = render_cfg + self.base_visualization(filepath, format=format, cfg=cfgs) + + def visualize_with_depedency(self, filepath, format, depedency_file): + assert os.path.exists(depedency_file) + + def visualize_with_sensitivity(self, filepath, format, sensitivity_file): + assert os.path.exists(sensitivity_file) + + def visualization(self, filename, format='jpg', + flops_file=None, + sensitivity_file=None, + depedency_file=None): + + # First, visualize the network architecture only + self.base_visualization(filename, format=format) + # if the flops file is specified, we also render + # a image with the flops information. + if flops_file is not None: + flops_img = filename + '_flops' + self.visualize_with_flops(flops_img, format, flops_file) From 8a7a7991682272ae9a23276a353331f95797de16 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 14 May 2020 13:30:46 +0000 Subject: [PATCH 04/47] Add the depedency rendering feature. Signed-off-by: Ningxin --- .../topology/torch/graph_from_trace.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index a211b0078d..6d7584338f 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -229,6 +229,33 @@ def visualize_with_flops(self, filepath, format, flops_file): def visualize_with_depedency(self, filepath, format, depedency_file): assert os.path.exists(depedency_file) + f_handle = open(depedency_file, 'r') + csv_r = csv.reader(f_handle) + # skip the header of the csv file + header = next(csv_r) + depedency_sets = [] + for row in csv_r: + tmp_set = set() + for i in range(1, len(row)): + tmp_set.add(row[i]) + depedency_sets.append(tmp_set) + f_handle.close() + # Create the render configs, assign the same color for the + # same depedency set + cfgs = {} + colorid = 0 + for tmp_set in depedency_sets: + if len(tmp_set) == 1: + # This layer has no depedency + continue + colorid = (colorid + 1) % 12 + str_color = "/paired12/%d" % (colorid + 1) + for layername in tmp_set: + render_cfg = {'shape': 'ellipse', + 'fillcolor': str_color, 'style': 'filled'} + cfgs[layername] = render_cfg + self.base_visualization(filepath, format=format, cfg=cfgs) + def visualize_with_sensitivity(self, filepath, format, sensitivity_file): assert os.path.exists(sensitivity_file) @@ -245,3 +272,8 @@ def visualization(self, filename, format='jpg', if flops_file is not None: flops_img = filename + '_flops' self.visualize_with_flops(flops_img, format, flops_file) + + if depedency_file is not None: + depedency_img = filename + '_depedency' + self.visualize_with_depedency( + depedency_img, format, depedency_file) From 362441a3ad91bc06e6c7935ba98a3591c97c4b46 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Fri, 15 May 2020 02:11:22 +0000 Subject: [PATCH 05/47] Update the interface of the SensitivityAnalysis Signed-off-by: Ningxin --- .../sensitivity/torch/sensitivity_analysis.py | 95 +++++++++++++------ .../topology/torch/graph_from_trace.py | 6 ++ 2 files changed, 74 insertions(+), 27 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py index 0e901db39b..6d1027355d 100644 --- a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -4,7 +4,8 @@ import os import torch import copy -import json +import csv +import logging import numpy as np import torch.nn as nn from collections import OrderedDict @@ -16,9 +17,13 @@ SUPPORTED_OP_NAME = ['Conv2d', 'Conv1d'] SUPPORTED_OP_TYPE = [getattr(nn, name) for name in SUPPORTED_OP_NAME] +logger = logging.getLogger('Sensitivity_Analysis') +logger.setLevel(logging.INFO) + + class SensitivityAnalysis: - def __init__(self, model, val_func, ratio_step=0.1): + def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop=None): # TODO Speedup by ratio_threshold or list # TODO l1 or l2 seted here """ @@ -33,16 +38,35 @@ def __init__(self, model, val_func, ratio_step=0.1): , therefore the user need to cover this part by themselves. val_func take the model as the first input parameter, and return the accuracy as output. - ratio_step: - the step to change the prune ratio during the analysis + sparsities: + The sparsity list provided by users. + prune_type: + The pruner type used to prune the conv layers, default is 'l1', + and 'l2', 'fine-grained' is also supported. + early_stop: + If this flag is set, the sensitivity analysis + for a conv layer will early stop when the accuracy + drop already reach the value of early_stop (0.05 for example). + """ self.model = model self.val_func = val_func - self.ratio_step = ratio_step self.target_layer = OrderedDict() self.ori_state_dict = copy.deepcopy(self.model.state_dict()) self.target_layer = {} self.sensitivities = {} + if sparsities is not None: + self.sparsities = sorted(sparsities) + else: + self.sparsities = np.arange(0.1, 1.0, 0.1) + self.sparsities = [np.round(x, 2) for x in self.sparsities] + self.Pruner = L1FilterPruner + if prune_type == 'l2': + self.Pruner = L2FilterPruner + elif prune_type == 'fine-grained': + self.Pruner = LevelPruner + self.early_stop = early_stop + self.ori_acc = None # original accuracy for the model # already_pruned is for the iterative sensitivity analysis # For example, sensitivity_pruner iteratively prune the target # model according to the sensitivity. After each round of @@ -62,7 +86,7 @@ def model_parse(self): self.target_layer[name] = submodel self.already_pruned[name] = 0 - def analysis(self, start=0, end=None, type='l1'): + def analysis(self, start=0, end=None): """ This function analyze the sensitivity to pruning for each conv layer in the target model. @@ -77,8 +101,6 @@ def analysis(self, start=0, end=None, type='l1'): Layer index of the sensitivity analysis start end: Layer index of the sensitivity analysis end - type: - Prune type of the Conv layers (l1/l2) Returns ------- @@ -90,29 +112,41 @@ def analysis(self, start=0, end=None, type='l1'): end = self.layers_count assert start >= 0 and end <= self.layers_count assert start <= end + if self.ori_acc is None: + self.ori_acc = self.val_func(self.model) namelist = list(self.target_layer.keys()) for layerid in range(start, end): name = namelist[layerid] - self.sensitivities[name] = {} - for prune_ratio in np.arange(self.ratio_step, 1.0, self.ratio_step): - print('PruneRatio: ', prune_ratio) - prune_ratio = np.round(prune_ratio, 2) + self.sensitivities[name] = { 0.0 : self.ori_acc} + for sparsity in self.sparsities: # Calculate the actual prune ratio based on the already pruned ratio - prune_ratio = ( - 1.0 - self.already_pruned[name]) * prune_ratio + self.already_pruned[name] - cfg = [{'sparsity': prune_ratio, 'op_names': [ + sparsity = ( + 1.0 - self.already_pruned[name]) * sparsity + self.already_pruned[name] + # TODO In current L1/L2 Filter Pruner, the 'op_types' is still necessary + # I think the L1/L2 Pruner should specify the op_types automaticlly + # according to the op_names + cfg = [{'sparsity': sparsity, 'op_names': [ name], 'op_types': ['Conv2d']}] - pruner = L1FilterPruner(self.model, cfg) + pruner = self.Pruner(self.model, cfg) pruner.compress() val_acc = self.val_func(self.model) - self.sensitivities[name][prune_ratio] = val_acc + logger.info('Layer: %s Sparsity: %.2f Accuracy: %.4f' % + (name, sparsity, val_acc)) + + self.sensitivities[name][sparsity] = val_acc pruner._unwrap_model() - # TODO outside the ratio loop - # reset the weights pruned by the pruner - self.model.load_state_dict(self.ori_state_dict) - # print('Reset') - # print(self.val_func(self.model)) del pruner + # if the accuracy drop already reach the 'early_stop' + if self.early_stop is not None: + if val_acc + self.early_stop < self.ori_acc: + break + + # reset the weights pruned by the pruner, because + # out sparsities is sorted, so we donnot need to reset + # weight of the layer when the sparsity changes, instead, + # we only need reset the weight when the pruning layer changes. + self.model.load_state_dict(self.ori_state_dict) + return self.sensitivities def visualization(self, outdir, merge=False): @@ -173,18 +207,25 @@ def visualization(self, outdir, merge=False): def export(self, filepath): """ - #TODO CSV Export the results of the sensitivity analysis - to a json file. + to a csv file. Parameters ---------- filepath: Path of the output file """ - # TODO csv - with open(filepath, 'w') as jf: - json.dump(self.sensitivities, jf, indent=4) + str_sparsities = [str(x) for x in self.sparsities] + header = ['layername'] + str_sparsities + with open(filepath, 'w') as csvf: + csv_w = csv.writer(csvf) + csv_w.writerow(header) + for layername in self.sensitivities: + row = [] + row.append(layername) + for sparsity in sorted(self.sensitivities[layername].keys()): + row.append(self.sensitivities[layername][sparsity]) + csv_w.writerow(row) def update_already_pruned(self, layername, ratio): """ diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 6d7584338f..2fd6d4618f 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -259,6 +259,12 @@ def visualize_with_depedency(self, filepath, format, depedency_file): def visualize_with_sensitivity(self, filepath, format, sensitivity_file): assert os.path.exists(sensitivity_file) + f_handle = open(sensitivity_file, 'r') + csv_r = csv.reader(f_handle) + header = next(csv_r) + sparsities = [float(x) for x in header[1:]] + + f_handle.close() def visualization(self, filename, format='jpg', flops_file=None, From e69e78fa719f03b76db96ba6e5c5cfa7653fcc42 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Fri, 15 May 2020 05:21:00 +0000 Subject: [PATCH 06/47] Add sensitivity rendering feature. Signed-off-by: Ningxin --- .../sensitivity/torch/sensitivity_analysis.py | 31 ++++++++++------- .../topology/torch/graph_from_trace.py | 33 +++++++++++++++++-- 2 files changed, 51 insertions(+), 13 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py index 6d1027355d..8a13c30f8b 100644 --- a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -21,11 +21,8 @@ logger.setLevel(logging.INFO) - class SensitivityAnalysis: - def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop=None): - # TODO Speedup by ratio_threshold or list - # TODO l1 or l2 seted here + def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop=1.0): """ Perform sensitivity analysis for this model. Parameters @@ -47,6 +44,8 @@ def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop If this flag is set, the sensitivity analysis for a conv layer will early stop when the accuracy drop already reach the value of early_stop (0.05 for example). + The default value is 1.0, which means the analysis won't stop + until all given sparsities are tested. """ self.model = model @@ -86,7 +85,7 @@ def model_parse(self): self.target_layer[name] = submodel self.already_pruned[name] = 0 - def analysis(self, start=0, end=None): + def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): """ This function analyze the sensitivity to pruning for each conv layer in the target model. @@ -101,6 +100,12 @@ def analysis(self, start=0, end=None): Layer index of the sensitivity analysis start end: Layer index of the sensitivity analysis end + val_args: + args for the val_function + val_kwargs: + kwargs for the val_funtion + The val_funtion will be called as: + val_function(*val_args, **val_kwargs) Returns ------- @@ -112,12 +117,17 @@ def analysis(self, start=0, end=None): end = self.layers_count assert start >= 0 and end <= self.layers_count assert start <= end + if val_args is None: + val_args = [] + if val_kwargs is None: + val_kwargs = {} + # Get the validation accuracy before pruning if self.ori_acc is None: - self.ori_acc = self.val_func(self.model) + self.ori_acc = self.val_func(*val_args, **val_kwargs) namelist = list(self.target_layer.keys()) for layerid in range(start, end): name = namelist[layerid] - self.sensitivities[name] = { 0.0 : self.ori_acc} + self.sensitivities[name] = {0.0: self.ori_acc} for sparsity in self.sparsities: # Calculate the actual prune ratio based on the already pruned ratio sparsity = ( @@ -129,7 +139,7 @@ def analysis(self, start=0, end=None): name], 'op_types': ['Conv2d']}] pruner = self.Pruner(self.model, cfg) pruner.compress() - val_acc = self.val_func(self.model) + val_acc = self.val_func(*val_args, **val_kwargs) logger.info('Layer: %s Sparsity: %.2f Accuracy: %.4f' % (name, sparsity, val_acc)) @@ -137,9 +147,8 @@ def analysis(self, start=0, end=None): pruner._unwrap_model() del pruner # if the accuracy drop already reach the 'early_stop' - if self.early_stop is not None: - if val_acc + self.early_stop < self.ori_acc: - break + if val_acc + self.early_stop < self.ori_acc: + break # reset the weights pruned by the pruner, because # out sparsities is sorted, so we donnot need to reset diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 2fd6d4618f..7e7ae0421c 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -256,15 +256,39 @@ def visualize_with_depedency(self, filepath, format, depedency_file): cfgs[layername] = render_cfg self.base_visualization(filepath, format=format, cfg=cfgs) - def visualize_with_sensitivity(self, filepath, format, sensitivity_file): assert os.path.exists(sensitivity_file) f_handle = open(sensitivity_file, 'r') csv_r = csv.reader(f_handle) header = next(csv_r) + # sparsities is ordered in sensitivity analysis sparsities = [float(x) for x in header[1:]] - + sensitivity = {} + for row in csv_r: + layername = row[0] + accs = [float(_acc) for _acc in row[1:]] + sensitivity[layername] = accs f_handle.close() + # Note: Due to the early stop in SensitivityAnalysis, the number of + # accuracies of different sparsities may be different. The earlier + # the layers stops, the higher the sensitivity is. + cfgs = {} + color_scheme_count = 9 + for layername in sensitivity: + _max = sparsities[len(sensitivity[layername]) - 1] + _max_all = max(sparsities) + level = 1.0 - (_max / _max_all) # [0, 1] + level = int(color_scheme_count * level) # [0, 9] + print(layername, level) + print(sensitivity[layername]) + # color number start from 1 + if level == 0: + level = 1 + str_color = "/reds9/%d" % level + render_cfg = {'shape': 'ellipse', + 'fillcolor': str_color, 'style': 'filled'} + cfgs[layername] = render_cfg + self.base_visualization(filepath, format=format, cfg=cfgs) def visualization(self, filename, format='jpg', flops_file=None, @@ -283,3 +307,8 @@ def visualization(self, filename, format='jpg', depedency_img = filename + '_depedency' self.visualize_with_depedency( depedency_img, format, depedency_file) + + if sensitivity_file is not None: + sensitivity_img = filename + '_sensitivity' + self.visualize_with_sensitivity( + sensitivity_img, format, sensitivity_file) From 582327603e35eb075870e0eb016a1fcde9c219e0 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Fri, 15 May 2020 05:25:31 +0000 Subject: [PATCH 07/47] Add copyright and license. Signed-off-by: Ningxin --- src/sdk/pynni/nni/analysis_utils/__init__.py | 2 ++ src/sdk/pynni/nni/analysis_utils/topology/__init__.py | 2 ++ src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py | 2 ++ 3 files changed, 6 insertions(+) diff --git a/src/sdk/pynni/nni/analysis_utils/__init__.py b/src/sdk/pynni/nni/analysis_utils/__init__.py index e69de29bb2..0eca6426d9 100644 --- a/src/sdk/pynni/nni/analysis_utils/__init__.py +++ b/src/sdk/pynni/nni/analysis_utils/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. \ No newline at end of file diff --git a/src/sdk/pynni/nni/analysis_utils/topology/__init__.py b/src/sdk/pynni/nni/analysis_utils/topology/__init__.py index e69de29bb2..0eca6426d9 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/__init__.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. \ No newline at end of file diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py index e69de29bb2..0eca6426d9 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. \ No newline at end of file From 4a70d799ea393fa34b43efa9f23cf8b833867f7f Mon Sep 17 00:00:00 2001 From: Ningxin Date: Fri, 15 May 2020 05:28:37 +0000 Subject: [PATCH 08/47] Remove the unrelated files. Signed-off-by: Ningxin --- src/sdk/pynni/nni/analysis_utils/visualization/__init__.py | 0 src/sdk/pynni/nni/analysis_utils/visualization/torch/__init__.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 src/sdk/pynni/nni/analysis_utils/visualization/__init__.py delete mode 100644 src/sdk/pynni/nni/analysis_utils/visualization/torch/__init__.py diff --git a/src/sdk/pynni/nni/analysis_utils/visualization/__init__.py b/src/sdk/pynni/nni/analysis_utils/visualization/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/sdk/pynni/nni/analysis_utils/visualization/torch/__init__.py b/src/sdk/pynni/nni/analysis_utils/visualization/torch/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 From fc95dd7dc5bb63cfc67e59c8f1a295e1d9ee6256 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Fri, 15 May 2020 06:14:09 +0000 Subject: [PATCH 09/47] Fix some typos. Signed-off-by: Ningxin --- .../topology/torch/graph_from_trace.py | 28 ++++++------ ...shape_depedency.py => shape_dependency.py} | 44 +++++++++---------- 2 files changed, 35 insertions(+), 37 deletions(-) rename src/sdk/pynni/nni/analysis_utils/topology/torch/{shape_depedency.py => shape_dependency.py} (79%) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 7e7ae0421c..46454d6d23 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -227,26 +227,26 @@ def visualize_with_flops(self, filepath, format, flops_file): cfgs[layername] = render_cfg self.base_visualization(filepath, format=format, cfg=cfgs) - def visualize_with_depedency(self, filepath, format, depedency_file): - assert os.path.exists(depedency_file) - f_handle = open(depedency_file, 'r') + def visualize_with_dependency(self, filepath, format, dependency_file): + assert os.path.exists(dependency_file) + f_handle = open(dependency_file, 'r') csv_r = csv.reader(f_handle) # skip the header of the csv file header = next(csv_r) - depedency_sets = [] + dependency_sets = [] for row in csv_r: tmp_set = set() for i in range(1, len(row)): tmp_set.add(row[i]) - depedency_sets.append(tmp_set) + dependency_sets.append(tmp_set) f_handle.close() # Create the render configs, assign the same color for the - # same depedency set + # same dependency set cfgs = {} colorid = 0 - for tmp_set in depedency_sets: + for tmp_set in dependency_sets: if len(tmp_set) == 1: - # This layer has no depedency + # This layer has no dependency continue colorid = (colorid + 1) % 12 str_color = "/paired12/%d" % (colorid + 1) @@ -279,8 +279,6 @@ def visualize_with_sensitivity(self, filepath, format, sensitivity_file): _max_all = max(sparsities) level = 1.0 - (_max / _max_all) # [0, 1] level = int(color_scheme_count * level) # [0, 9] - print(layername, level) - print(sensitivity[layername]) # color number start from 1 if level == 0: level = 1 @@ -293,7 +291,7 @@ def visualize_with_sensitivity(self, filepath, format, sensitivity_file): def visualization(self, filename, format='jpg', flops_file=None, sensitivity_file=None, - depedency_file=None): + dependency_file=None): # First, visualize the network architecture only self.base_visualization(filename, format=format) @@ -303,10 +301,10 @@ def visualization(self, filename, format='jpg', flops_img = filename + '_flops' self.visualize_with_flops(flops_img, format, flops_file) - if depedency_file is not None: - depedency_img = filename + '_depedency' - self.visualize_with_depedency( - depedency_img, format, depedency_file) + if dependency_file is not None: + dependency_img = filename + '_dependency' + self.visualize_with_dependency( + dependency_img, format, dependency_file) if sensitivity_file is not None: sensitivity_img = filename + '_sensitivity' diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py similarity index 79% rename from src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py rename to src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py index ccaf3d7893..9634687e66 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_depedency.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py @@ -12,13 +12,13 @@ CONV_TYPE = 'aten::_convolution' -logger = logging.getLogger('Shape_Depedency') +logger = logging.getLogger('Shape_Dependency') -class ChannelDepedency: +class ChannelDependency: def __init__(self, model, data): """ - This model analyze the channel depedencis between the conv + This model analyze the channel dependencis between the conv layers in a model. Parameters @@ -33,8 +33,8 @@ def __init__(self, model, data): self.graph = self.graph_builder.graph self.forward_edge = self.graph_builder.forward_edge self.c2py = self.graph_builder.c2py - self.depedency = {} - self.build_channel_depedency() + self.dependency = {} + self.build_channel_dependency() def get_parent_convs(self, node): @@ -69,9 +69,9 @@ def get_parent_convs(self, node): queue.append(parent) return parent_convs - def build_channel_depedency(self): + def build_channel_dependency(self): """ - Build the channel depedency for the conv layers + Build the channel dependency for the conv layers in the model. """ for node in self.cnodes: @@ -83,18 +83,18 @@ def build_channel_depedency(self): # N * C * H * W if cat_dim != 1: parent_convs = self.get_parent_convs(node) - depedency_set = set(parent_convs) - # merge the depedencies + dependency_set = set(parent_convs) + # merge the dependencies for node in parent_convs: - if node in self.depedency: - depedency_set.update(self.depedency[node]) - # save the depedencies - for node in depedency_set: - self.depedency[node] = depedency_set + if node in self.dependency: + dependency_set.update(self.dependency[node]) + # save the dependencies + for node in dependency_set: + self.dependency[node] = dependency_set def filter_prune_check(self, ratios): """ - According to the channel depedencies between the conv + According to the channel dependencies between the conv layers, check if the filter pruning ratio for the conv layers is legal. @@ -117,11 +117,11 @@ def filter_prune_check(self, ratios): for node in self.cnodes: if node.kind() == CONV_TYPE and self.c2py[node].name in ratios: - if node not in self.depedency: - # this layer has no depedency on other layers + if node not in self.dependency: + # this layer has no dependency on other layers # it's legal to set any prune ratio between 0 and 1 continue - for other in self.depedency[node]: + for other in self.dependency[node]: if self.c2py[other].name not in ratios: return False elif ratios[self.c2py[node].name] != ratios[self.c2py[other].name]: @@ -130,9 +130,9 @@ def filter_prune_check(self, ratios): def export(self, filepath): """ - export the channel depedencies as a csv file. + export the channel dependencies as a csv file. """ - header = ['Depedency Set', 'Convolutional Layers'] + header = ['Dependency Set', 'Convolutional Layers'] setid = 0 visited = set() with open(filepath, 'w') as csvf: @@ -143,11 +143,11 @@ def export(self, filepath): continue setid += 1 row = ['Set %d' % setid] - if node not in self.depedency: + if node not in self.dependency: visited.add(node) row.append(self.c2py[node].name) else: - for other in self.depedency[node]: + for other in self.dependency[node]: visited.add(other) row.append(self.c2py[other].name) csv_w.writerow(row) From a90c35ee4865f8b5224a5e21b6aa9251cc90eaa7 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 18 May 2020 04:04:51 +0000 Subject: [PATCH 10/47] Fix a small issue. model should be set to eval mode before the jit.trace call. Signed-off-by: Ningxin --- .../pynni/nni/analysis_utils/topology/torch/graph_from_trace.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 46454d6d23..2b16a8b0fa 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -74,6 +74,8 @@ def __init__(self, model, data): """ self.model = model self.data = data + # set to the evaluation mode + self.model.eval() self.traced_model = jit.trace(model, data) self.forward_edge = {} self.graph = self.traced_model.graph From 1909ff00ac8fa8a0c41c54bd4d35d12af3b27d1d Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 19 May 2020 06:38:58 +0000 Subject: [PATCH 11/47] Fix a small issue. Signed-off-by: Ningxin --- .../sensitivity/torch/sensitivity_analysis.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py index 8a13c30f8b..a6e12dcba7 100644 --- a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -127,7 +127,7 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): namelist = list(self.target_layer.keys()) for layerid in range(start, end): name = namelist[layerid] - self.sensitivities[name] = {0.0: self.ori_acc} + self.sensitivities[name] = {} for sparsity in self.sparsities: # Calculate the actual prune ratio based on the already pruned ratio sparsity = ( @@ -186,6 +186,10 @@ def visualization(self, outdir, merge=False): X = list(self.sensitivities[name].keys()) X = sorted(X) Y = [self.sensitivities[name][x] for x in X] + if 0.00 not in X: + # add the original accuracy into the figure + X = [0.00] + X + Y = [self.ori_acc] + Y plt.figure(figsize=(8, 4)) plt.plot(X, Y, marker='*') plt.xlabel('Prune Ratio') @@ -202,6 +206,10 @@ def visualization(self, outdir, merge=False): X = list(self.sensitivities[name].keys()) X = sorted(X) Y = [self.sensitivities[name][x] for x in X] + if 0.00 not in X: + # add the original accuracy into the figure + X = [0.00] + X + Y = [self.ori_acc] + Y linestyle = LineStyles[styleid % len(LineStyles)] marker = Markers[styleid % len(Markers)] plt.plot(X, Y, label=name, linestyle=linestyle, marker=marker) From 2d13ddaef6c5a3fcba0448c00b5e41122d1389be Mon Sep 17 00:00:00 2001 From: Ningxin Date: Wed, 20 May 2020 07:18:10 +0000 Subject: [PATCH 12/47] Fix bug. In the original way, addmm will also triger the dependency set searching, which may lead to a wrong dependency set. Signed-off-by: Ningxin --- .../nni/analysis_utils/topology/torch/shape_dependency.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py index 9634687e66..39ad84c285 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py @@ -11,7 +11,8 @@ CONV_TYPE = 'aten::_convolution' - +ADD_TYPES = ['aten::add', 'aten::add_'] +CAT_TYPE = 'aten::cat' logger = logging.getLogger('Shape_Dependency') @@ -76,9 +77,9 @@ def build_channel_dependency(self): """ for node in self.cnodes: parent_convs = [] - if 'add' in node.kind(): + if node.kind() in ADD_TYPES: parent_convs = self.get_parent_convs(node) - if 'cat' in node.kind(): + if node.kind() == CAT_TYPE: cat_dim = list(node.inputs())[1].toIValue() # N * C * H * W if cat_dim != 1: From 0e7962478c87f44e240cd0d149a865c303be2b43 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 21 May 2020 03:58:58 +0000 Subject: [PATCH 13/47] Add compatibility with versions prior to torch-1.4.0. Signed-off-by: Ningxin --- .../topology/torch/graph_from_trace.py | 29 +++++++++---------- .../topology/torch/shape_dependency.py | 2 +- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 2b16a8b0fa..41e2033715 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -8,12 +8,11 @@ import logging import torch.nn as nn import torch.jit as jit -from torch.utils.tensorboard._pytorch_graph import CLASSTYPE_KIND, GETATTR_KIND -__all__ = ["PyNode", "GraphBuilder"] +__all__ = ["VisualGraph"] TUPLE_UNPACK = 'prim::TupleUnpack' - +CLASSTYPE_KIND = 'ClassType' logger = logging.getLogger('Graph_From_Trace') @@ -29,9 +28,12 @@ def __init__(self, cnode, isValue=False): self.shape = self.cnode.type().sizes() if self.isOp: scopename = cnode.scopeName() - scopename = re.split('/', scopename) - # note, the scopeName of node may be empty - self.name = scopename[-1] if len(scopename) > 0 else '' + if torch.__version__ >= '1.4.0': + # note, the scopeName of node may be empty + scopename = re.split('/', scopename) + self.name = scopename[-1] if len(scopename) > 0 else '' + else: + self.name = '.'.join(re.findall(r'\[(.*?)\]', scopename)) # remove the __module prefix if self.name.startswith('__module.'): self.name = self.name[len('__module.'):] @@ -54,7 +56,7 @@ def parents(self): return [self.cnode.node()] -class GraphBuilder: +class VisualGraph: def __init__(self, model, data): """ We build the network architecture graph according the graph @@ -74,14 +76,11 @@ def __init__(self, model, data): """ self.model = model self.data = data - # set to the evaluation mode - self.model.eval() - self.traced_model = jit.trace(model, data) - self.forward_edge = {} - self.graph = self.traced_model.graph - # Start from pytorch 1.4.0, we need this function to get more - # detail information - torch._C._jit_pass_inline(self.graph) + with torch.onnx.set_training(model, False): + self.traced_model = jit.trace(model, data) + self.graph = self.traced_model.graph + torch._C._jit_pass_inline(self.graph) + self.forward_edge = {} self.c2py = {} self.visited = set() self.build_graph() diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py index 39ad84c285..aac0541773 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py @@ -29,7 +29,7 @@ def __init__(self, model, data): data: The example input data to trace the network architecture. """ - self.graph_builder = GraphBuilder(model, data) + self.graph_builder = VisualGraph(model, data) self.cnodes = list(self.graph_builder.graph.nodes()) self.graph = self.graph_builder.graph self.forward_edge = self.graph_builder.forward_edge From 96cea745f72e46bfd86a0aa02b0b8815555496f9 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 25 May 2020 06:36:25 +0000 Subject: [PATCH 14/47] Add the mask conflict fix module. mask_conflict can fix the mask conflict of the layers that has channel dependency. This part should be called before the speedup function, so that, the speedup module can handle the model with residual connection/concat operations. Signed-off-by: Ningxin --- .../topology/torch/mask_conflict.py | 88 +++++++++++++++++++ .../topology/torch/shape_dependency.py | 28 ++++++ 2 files changed, 116 insertions(+) create mode 100644 src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py new file mode 100644 index 0000000000..91d3be2aaa --- /dev/null +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py @@ -0,0 +1,88 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import torch +import logging +import numpy as np +from .shape_dependency import ChannelDependency +# logging.basicConfig(level = logging.DEBUG) +_logger = logging.getLogger('FixMaskConflict') + +class MaskConflict: + def __init__(self, model, dummy_input, mask_file): + """ + MaskConflict fix the mask conflict between the layers that + has channel dependecy with each other. + + Parameters + ---------- + model: + model to fix the mask conflict + dummy_input: + input example to trace the model + mask_file: + the path of the original mask file + """ + self.model = model + self.dummy_input = dummy_input + self.mask_file = mask_file + self.masks = torch.load(self.mask_file) + + def fix_mask_conflict(self): + """ + Fix the mask conflict before the mask inference for the layers that + has shape dependencies. This function should be called before the + mask inference of the 'speedup' module. + """ + channel_depen = ChannelDependency(self.model, self.dummy_input) + depen_sets = channel_depen.dependency_sets + for dset in depen_sets: + if len(dset) == 1: + # This layer has no channel dependency with other layers + continue + else: + channel_remain = set() + for name in dset: + if name not in self.masks: + # this layer is not pruned + continue + w_mask = self.masks[name]['weight'] + shape = w_mask.size() + count = np.prod(shape[1:]) + all_ones = [] + all_zeros = [] + for i in range(w_mask.size(0)): + _count = torch.sum(w_mask[i]) + if _count == count: + all_ones.append(i) + elif _count == 0: + all_zeros.append(i) + if len(all_ones) + len(all_zeros) < w_mask.size(0): + # In fine-grained pruning, there is no need to check + # the shape conflict + _logger.info(','.join(dset) + 'use fine-grained pruning') + break + else: + channel_remain.update(all_ones) + _logger.debug('Layer: '+name) + _logger.debug('Original pruned filters:' + str(all_zeros)) + # Update the masks for the layers in the dependency set + ori_channels = 0 + for name in dset: + mask = self.masks[name] + w_shape = mask['weight'].size() + ori_channels = w_shape[0] + for i in channel_remain: + mask['weight'][i] = torch.ones(w_shape[1:]) + if hasattr(mask, 'bias'): + mask['bias'][i] = 1 + _logger.info(','.join(dset)) + _logger.info('Pruned Filters after fixing conflict:') + pruned_filters = set(list(range(ori_channels)))-channel_remain + _logger.info(str(sorted(pruned_filters))) + return self.masks + + def export(self, path): + """ + Export the masks after fixing the conflict to file. + """ + torch.save(self.masks, path) \ No newline at end of file diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py index aac0541773..688eb7c1a1 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py @@ -153,5 +153,33 @@ def export(self, filepath): row.append(self.c2py[other].name) csv_w.writerow(row) + @property + def dependency_sets(self): + """ + Get the list of the dependency set. + + Returns + ------- + dependency_sets: + list of the dependency sets. For example, + [set(['conv1', 'conv2']), set(['conv3', 'conv4'])] + + """ + d_sets = [] + visited = set() + for node in self.cnodes: + if node.kind() != CONV_TYPE or node in visited: + continue + tmp_set = set() + if node not in self.dependency: + visited.add(node) + tmp_set.add(self.c2py[node].name) + else: + for other in self.dependency[node]: + visited.add(other) + tmp_set.add(self.c2py[other].name) + d_sets.append(tmp_set) + return d_sets + From 6029603a134c1d4dc5f36944bc1a031524d18970 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 25 May 2020 07:44:57 +0000 Subject: [PATCH 15/47] Update the interface. update the interface. if we alreay have the traced graph of the model we donnot need to trace the model again. Signed-off-by: Ningxin --- .../topology/torch/graph_from_trace.py | 23 ++++++++++++------- .../topology/torch/mask_conflict.py | 16 +++++++++++-- .../topology/torch/shape_dependency.py | 7 ++++-- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 41e2033715..3ad9db966f 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -57,7 +57,7 @@ def parents(self): class VisualGraph: - def __init__(self, model, data): + def __init__(self, model=None, data=None, graph=None): """ We build the network architecture graph according the graph in the scriptmodule. However, the original graph from jit.trace @@ -72,14 +72,21 @@ def __init__(self, model, data): The model to build the network architecture. data: The sample input data for the model. - - """ + graph: + Traced graph from jit.trace, if this option is set, + we donnot need to trace the model again. + """ self.model = model - self.data = data - with torch.onnx.set_training(model, False): - self.traced_model = jit.trace(model, data) - self.graph = self.traced_model.graph - torch._C._jit_pass_inline(self.graph) + self.data = data + if graph is not None: + self.graph = graph + elif (model is not None) and (data is not None): + with torch.onnx.set_training(model, False): + self.traced_model = jit.trace(model, data) + self.graph = self.traced_model.graph + torch._C._jit_pass_inline(self.graph) + else: + raise Exception('Input parameters invalid!') self.forward_edge = {} self.c2py = {} self.visited = set() diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py index 91d3be2aaa..4ff35f5e5b 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py @@ -8,7 +8,7 @@ _logger = logging.getLogger('FixMaskConflict') class MaskConflict: - def __init__(self, model, dummy_input, mask_file): + def __init__(self, mask_file, model=None, dummy_input=None, graph=None): """ MaskConflict fix the mask conflict between the layers that has channel dependecy with each other. @@ -21,9 +21,21 @@ def __init__(self, model, dummy_input, mask_file): input example to trace the model mask_file: the path of the original mask file + graph: + the traced graph of the target model, is this parameter is not None, + we donnot use the model and dummpy_input to get the trace graph. """ + # check if the parameters are valid + parameter_valid = False + if graph is not None: + parameter_valid = True + elif (model is not None) and (dummy_input is not None): + parameter_valid = True + if not parameter_valid: + raise Exception('The input parameters is invalid!') self.model = model self.dummy_input = dummy_input + self.graph = graph self.mask_file = mask_file self.masks = torch.load(self.mask_file) @@ -33,7 +45,7 @@ def fix_mask_conflict(self): has shape dependencies. This function should be called before the mask inference of the 'speedup' module. """ - channel_depen = ChannelDependency(self.model, self.dummy_input) + channel_depen = ChannelDependency(self.model, self.dummy_input, self.graph) depen_sets = channel_depen.dependency_sets for dset in depen_sets: if len(dset) == 1: diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py index 688eb7c1a1..3a4ce4a3ce 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py @@ -17,7 +17,7 @@ class ChannelDependency: - def __init__(self, model, data): + def __init__(self, model=None, data=None, graph=None): """ This model analyze the channel dependencis between the conv layers in a model. @@ -28,8 +28,11 @@ def __init__(self, model, data): The model to be analyzed. data: The example input data to trace the network architecture. + graph: + if we alreay has the traced graph of the target model, we donnot + need to trace the model again. """ - self.graph_builder = VisualGraph(model, data) + self.graph_builder = VisualGraph(model, data, graph) self.cnodes = list(self.graph_builder.graph.nodes()) self.graph = self.graph_builder.graph self.forward_edge = self.graph_builder.forward_edge From 9beb1e2453f5e1c90832c19eb1e9c61419dc1dcf Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 26 May 2020 06:58:44 +0000 Subject: [PATCH 16/47] Add unit test for analysis_utils. Add unittest for tools in analysis_utils to verify the correctness of the visulization, channel dependency, and mask conflict. Signed-off-by: Ningxin --- src/sdk/pynni/tests/test_analysis_utils.py | 142 +++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 src/sdk/pynni/tests/test_analysis_utils.py diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_analysis_utils.py new file mode 100644 index 0000000000..f84e95ab15 --- /dev/null +++ b/src/sdk/pynni/tests/test_analysis_utils.py @@ -0,0 +1,142 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import os +import torch +import torchvision +import torch.nn as nn +import torch.nn.functional as F +import torchvision.models as models +import math +import numpy as np +import unittest +from unittest import TestCase, main +from nni.compression.torch import L1FilterPruner +from nni.analysis_utils.topology.torch.graph_from_trace import VisualGraph +from nni.analysis_utils.topology.torch.shape_dependency import ChannelDependency +from nni.analysis_utils.topology.torch.mask_conflict import MaskConflict +from nni.analysis_utils.sensitivity.torch.sensitivity_analysis import SensitivityAnalysis + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +prefix = 'analysis_test' +model_names = ['alexnet', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg19', + 'resnet18', 'resnet34', 'squeezenet1_1', 'googlenet', + 'shufflenet_v2_x1_0', 'mobilenet_v2', 'wide_resnet50_2'] + +channel_dependency_ground_truth = { + 'resnet18': [{'layer1.0.conv2', 'layer1.1.conv2', 'conv1'}, + {'layer2.1.conv2', 'layer2.0.conv2', 'layer2.0.downsample.0'}, + {'layer3.0.downsample.0', 'layer3.1.conv2', 'layer3.0.conv2'}, + {'layer4.0.downsample.0', 'layer4.1.conv2', 'layer4.0.conv2'}], + 'resnet34': [{'conv1', 'layer1.2.conv2', 'layer1.1.conv2', 'layer1.0.conv2'}, + {'layer2.3.conv2', 'layer2.0.conv2', 'layer2.0.downsample.0','layer2.1.conv2', 'layer2.2.conv2'}, + {'layer3.3.conv2', 'layer3.0.conv2', 'layer3.4.conv2', 'layer3.0.downsample.0', 'layer3.5.conv2', 'layer3.1.conv2', 'layer3.2.conv2'}, + {'layer4.0.downsample.0', 'layer4.1.conv2', 'layer4.2.conv2', 'layer4.0.conv2'}], + 'mobilenet_v2': [{'features.3.conv.2', 'features.2.conv.2'}, + {'features.6.conv.2', 'features.4.conv.2', 'features.5.conv.2'}, + {'features.8.conv.2', 'features.7.conv.2', 'features.10.conv.2', 'features.9.conv.2'}, + {'features.11.conv.2', 'features.13.conv.2', 'features.12.conv.2'}, + {'features.14.conv.2', 'features.16.conv.2', 'features.15.conv.2'}], + 'wide_resnet50_2': [{'layer1.2.conv3', 'layer1.1.conv3', 'layer1.0.conv3', 'layer1.0.downsample.0'}, + {'layer2.1.conv3', 'layer2.0.conv3', 'layer2.0.downsample.0', 'layer2.2.conv3', 'layer2.3.conv3'}, + {'layer3.3.conv3', 'layer3.0.conv3', 'layer3.2.conv3', 'layer3.0.downsample.0', 'layer3.1.conv3', 'layer3.4.conv3', 'layer3.5.conv3'}, + {'layer4.1.conv3', 'layer4.2.conv3', 'layer4.0.downsample.0', 'layer4.0.conv3'}], + 'alexnet': [], + 'vgg11': [], + 'vgg11_bn': [], + 'vgg13': [], + 'vgg19': [], + 'squeezenet1_1': [], + 'googlenet': [], + 'shufflenet_v2_x1_0': [] +} + +unittest.TestLoader.sortTestMethodsUsing = None + +class AnalysisUtilsTest(TestCase): + + def test_channel_dependency(self): + outdir = os.path.join(prefix, 'dependency') + os.makedirs(outdir, exist_ok=True) + for name in model_names: + print('Analyze channel dependency for %s' % name) + model = getattr(models, name) + net = model(pretrained=True).to(device) + dummy_input = torch.ones(1, 3, 224, 224).to(device) + channel_depen = ChannelDependency(net, dummy_input) + depen_sets = channel_depen.dependency_sets + d_set_count = 0 + for d_set in depen_sets: + if len(d_set) > 1: + d_set_count += 1 + assert d_set in channel_dependency_ground_truth[name] + assert d_set_count == len(channel_dependency_ground_truth[name]) + fpath = os.path.join(outdir, name) + channel_depen.export(fpath) + + def test_visulization(self): + outdir = os.path.join(prefix, 'visual') + os.makedirs(outdir, exist_ok=True) + for name in model_names: + print('Visualization for %s' % name) + model = getattr(models, name) + net = model().to(device) + dummy_input = torch.ones(1, 3, 224, 224).to(device) + vg = VisualGraph(net, dummy_input) + picpath = os.path.join(outdir, name) + depen_file = os.path.join('analysis_test/dependency', name) + vg.visualization(picpath, dependency_file=depen_file) + + def get_pruned_index(self, mask): + pruned_indexes = [] + shape = mask.size() + for i in range(shape[0]): + if torch.sum(mask[i]).item() == 0: + pruned_indexes.append(i) + + return pruned_indexes + + def test_mask_conflict(self): + outdir = os.path.join(prefix, 'masks') + os.makedirs(outdir, exist_ok=True) + for name in model_names: + print('Test mask conflict for %s' % name) + model = getattr(models, name) + net = model().to(device) + dummy_input = torch.ones(1, 3, 224, 224).to(device) + # random generate the prune sparsity for each layer + cfglist = [] + for layername, layer in net.named_modules(): + if isinstance(layer, nn.Conv2d): + # pruner cannot allow the sparsity to be 0 or 1 + sparsity = np.random.uniform(0.01, 0.99) + cfg = {'op_types':['Conv2d'], 'op_names':[layername], 'sparsity':sparsity} + cfglist.append(cfg) + pruner = L1FilterPruner(net, cfglist) + pruner.compress() + ck_file = os.path.join(outdir, '%s.pth' % name) + mask_file = os.path.join(outdir, '%s_mask' % name) + pruner.export_model(ck_file, mask_file) + pruner._unwrap_model() + # Fix the mask conflict + mf = MaskConflict(mask_file, net, dummy_input) + fixed_mask = mf.fix_mask_conflict() + mf.export(os.path.join(outdir, '%s_fixed_mask' % name)) + # use the channel dependency groud truth to check if + # fix the mask conflict successfully + for dset in channel_dependency_ground_truth[name]: + lset = list(dset) + for i in range(len(lset)): + assert fixed_mask[lset[0]]['weight'].size(0) == fixed_mask[lset[i]]['weight'].size(0) + w_index1 = self.get_pruned_index(fixed_mask[lset[0]]['weight']) + w_index2 = self.get_pruned_index(fixed_mask[lset[i]]['weight']) + assert w_index1 == w_index2 + if hasattr(fixed_mask[lset[0]], 'bias'): + b_index1 = self.get_pruned_index(fixed_mask[lset[0]]['bias']) + b_index2 = self.get_pruned_index(fixed_mask[lset[i]]['bias']) + assert b_index1 == b_index2 + + + +if __name__ == '__main__': + main() From 6b25ff3636c5c5753e9e554744a3ab5371e00eba Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 28 May 2020 03:27:38 +0000 Subject: [PATCH 17/47] Fix the format warnings from pylint. Signed-off-by: Ningxin --- .../sensitivity/torch/sensitivity_analysis.py | 43 ++++----- .../topology/torch/graph_from_trace.py | 95 ++++++++++--------- .../topology/torch/mask_conflict.py | 94 +++++++++--------- .../topology/torch/shape_dependency.py | 47 ++++----- src/sdk/pynni/requirements.txt | 3 + src/sdk/pynni/tests/test_analysis_utils.py | 49 ++++++---- 6 files changed, 170 insertions(+), 161 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py index a6e12dcba7..27d40350ce 100644 --- a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -2,18 +2,23 @@ # Licensed under the MIT license. import os -import torch import copy import csv import logging +from collections import OrderedDict +import matplotlib +import matplotlib.pyplot as plt +from matplotlib.lines import Line2D + import numpy as np import torch.nn as nn -from collections import OrderedDict + from nni.compression.torch import LevelPruner from nni.compression.torch import L1FilterPruner from nni.compression.torch import L2FilterPruner - +# use Agg backend +matplotlib.use('Agg') SUPPORTED_OP_NAME = ['Conv2d', 'Conv1d'] SUPPORTED_OP_TYPE = [getattr(nn, name) for name in SUPPORTED_OP_NAME] @@ -30,12 +35,12 @@ def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop model: the model to perform sensitivity analysis val_func: - validation function for the model. Due to + validation function for the model. Due to different models may need different dataset/criterion , therefore the user need to cover this part by themselves. - val_func take the model as the first input parameter, and + val_func take the model as the first input parameter, and return the accuracy as output. - sparsities: + sparsities: The sparsity list provided by users. prune_type: The pruner type used to prune the conv layers, default is 'l1', @@ -87,18 +92,18 @@ def model_parse(self): def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): """ - This function analyze the sensitivity to pruning for + This function analyze the sensitivity to pruning for each conv layer in the target model. If %start and %end are not set, we analyze all the conv - layers by default. Users can specify several layers to + layers by default. Users can specify several layers to analyze or parallelize the analysis process easily through the %start and %end parameter. Parameters ---------- - start: + start: Layer index of the sensitivity analysis start - end: + end: Layer index of the sensitivity analysis end val_args: args for the val_function @@ -110,7 +115,7 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): Returns ------- sensitivities: - dict object that stores the trajectory of the + dict object that stores the trajectory of the accuracy when the prune ratio changes """ if not end: @@ -140,8 +145,8 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): pruner = self.Pruner(self.model, cfg) pruner.compress() val_acc = self.val_func(*val_args, **val_kwargs) - logger.info('Layer: %s Sparsity: %.2f Accuracy: %.4f' % - (name, sparsity, val_acc)) + logger.info('Layer: %s Sparsity: %.2f Accuracy: %.4f', + name, sparsity, val_acc) self.sensitivities[name][sparsity] = val_acc pruner._unwrap_model() @@ -160,24 +165,18 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): def visualization(self, outdir, merge=False): """ - # Visualize the sensitivity curves of the model Parameters ---------- - outdir: + outdir: output directory of the image merge: - if merge all the sensitivity curves into a - single image. If not, we will draw a picture + if merge all the sensitivity curves into a + single image. If not, we will draw a picture for each target layer of the model. """ os.makedirs(outdir, exist_ok=True) - import matplotlib - # use Agg backend - matplotlib.use('Agg') - import matplotlib.pyplot as plt - from matplotlib.lines import Line2D LineStyles = [':', '-.', '--', '-'] Markers = list(Line2D.markers.keys()) if not merge: diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 3ad9db966f..95292ddc5c 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -4,10 +4,11 @@ import re import os import csv -import torch import logging -import torch.nn as nn +import torch import torch.jit as jit +import graphviz + __all__ = ["VisualGraph"] @@ -62,22 +63,22 @@ def __init__(self, model=None, data=None, graph=None): We build the network architecture graph according the graph in the scriptmodule. However, the original graph from jit.trace has lots of detailed information which make the graph complicated - and hard to understand. So we also store a copy of the network - architecture in the self.forward_edge. We will simplify the network - architecure (such as unpack_tuple, etc) stored in self.forward_edge + and hard to understand. So we also store a copy of the network + architecture in the self.forward_edge. We will simplify the network + architecure (such as unpack_tuple, etc) stored in self.forward_edge to make the graph more clear. Parameters ---------- - model: + model: The model to build the network architecture. - data: + data: The sample input data for the model. graph: Traced graph from jit.trace, if this option is set, we donnot need to trace the model again. - """ + """ self.model = model - self.data = data + self.data = data if graph is not None: self.graph = graph elif (model is not None) and (data is not None): @@ -87,7 +88,7 @@ def __init__(self, model=None, data=None, graph=None): torch._C._jit_pass_inline(self.graph) else: raise Exception('Input parameters invalid!') - self.forward_edge = {} + self.forward_edge = {} self.c2py = {} self.visited = set() self.build_graph() @@ -95,10 +96,11 @@ def __init__(self, model=None, data=None, graph=None): def unpack_tuple(self): """ - jit.trace also traces the tuple creation and unpack, which makes - the grapgh complex and difficult to understand. Therefore, we + jit.trace also traces the tuple creation and unpack, which makes + the grapgh complex and difficult to understand. Therefore, we unpack the tuple handly to make the graph clear. """ + parent_node = None for node in self.graph.nodes(): if node.kind() == TUPLE_UNPACK: in_tuple = list(node.inputs())[0] @@ -106,7 +108,7 @@ def unpack_tuple(self): in_tensors = list(parent_node.inputs()) out_tensors = list(node.outputs()) assert len(in_tensors) == len(out_tensors) - for i in range(len(in_tensors)): + for i, _ in enumerate(in_tensors): ori_edges = self.forward_edge[in_tensors[i]] # remove the out edge to the Tuple_construct OP node self.forward_edge[in_tensors[i]] = list( @@ -122,13 +124,13 @@ def build_graph(self): """ for node in self.graph.nodes(): self.c2py[node] = PyNode(node) - for input in node.inputs(): - if input not in self.c2py: - self.c2py[input] = PyNode(input, True) - if input in self.forward_edge: - self.forward_edge[input].append(node) + for _input in node.inputs(): + if _input not in self.c2py: + self.c2py[_input] = PyNode(_input, True) + if _input in self.forward_edge: + self.forward_edge[_input].append(node) else: - self.forward_edge[input] = [node] + self.forward_edge[_input] = [node] for output in node.outputs(): if output not in self.c2py: self.c2py[output] = PyNode(output, True) @@ -143,18 +145,18 @@ def visual_traverse(self, curnode, graph, lastnode, cfg): at the same time. Parameters ---------- - curnode: + curnode: Current visiting node(tensor/module). - graph: + graph: The handle of the Dgraph. - lastnode: + lastnode: The last visited node. cfg: - Dict object to specify the rendering + Dict object to specify the rendering configuration for operation node. key is the name of the operation, value is a also a dict. For example, - {'conv1': {'shape':'box', 'color':'red'}} + {'conv1': {'shape':'box', 'color':'red'}} """ if curnode in self.visited: if lastnode is not None: @@ -178,39 +180,38 @@ def visual_traverse(self, curnode, graph, lastnode, cfg): for _next in self.forward_edge[curnode]: self.visual_traverse(_next, graph, curnode, cfg) - def base_visualization(self, filename, format='jpg', cfg=None): + def base_visualization(self, filename, save_format='jpg', cfg=None): """ visualize the network architecture automaticlly. Parameters ---------- - filename: + filename: The filename of the saved image file. - format: - The output format. + save_format: + The output save_format. """ # TODO and detailed mode for the visualization function # in which the graph will also contain all the weights/bias # information. if not cfg: cfg = {} - import graphviz - graph = graphviz.Digraph(format=format) + graph = graphviz.Digraph(format=save_format) self.visited.clear() - for input in self.graph.inputs(): - if input.type().kind() == CLASSTYPE_KIND: + for _input in self.graph.inputs(): + if _input.type().kind() == CLASSTYPE_KIND: continue - self.visual_traverse(input, graph, None, cfg) + self.visual_traverse(_input, graph, None, cfg) graph.render(filename) - def visualize_with_flops(self, filepath, format, flops_file): + def visualize_with_flops(self, filepath, save_format, flops_file): assert os.path.exists(flops_file) f_handle = open(flops_file, 'r') csv_r = csv.reader(f_handle) flops = {} # skip the header of the csv file - header = next(csv_r) + _ = next(csv_r) for row in csv_r: - if(len(row) == 2): + if len(row) == 2: layername = row[0] _flops = float(row[1]) flops[layername] = _flops @@ -233,14 +234,14 @@ def visualize_with_flops(self, filepath, format, flops_file): render_cfg = render_cfg = {'shape': 'ellipse', 'fillcolor': "/reds9/"+str(flops_level), 'style': 'filled'} cfgs[layername] = render_cfg - self.base_visualization(filepath, format=format, cfg=cfgs) + self.base_visualization(filepath, save_format=save_format, cfg=cfgs) - def visualize_with_dependency(self, filepath, format, dependency_file): + def visualize_with_dependency(self, filepath, save_format, dependency_file): assert os.path.exists(dependency_file) f_handle = open(dependency_file, 'r') csv_r = csv.reader(f_handle) # skip the header of the csv file - header = next(csv_r) + _ = next(csv_r) dependency_sets = [] for row in csv_r: tmp_set = set() @@ -262,9 +263,9 @@ def visualize_with_dependency(self, filepath, format, dependency_file): render_cfg = {'shape': 'ellipse', 'fillcolor': str_color, 'style': 'filled'} cfgs[layername] = render_cfg - self.base_visualization(filepath, format=format, cfg=cfgs) + self.base_visualization(filepath, save_format=save_format, cfg=cfgs) - def visualize_with_sensitivity(self, filepath, format, sensitivity_file): + def visualize_with_sensitivity(self, filepath, save_format, sensitivity_file): assert os.path.exists(sensitivity_file) f_handle = open(sensitivity_file, 'r') csv_r = csv.reader(f_handle) @@ -294,27 +295,27 @@ def visualize_with_sensitivity(self, filepath, format, sensitivity_file): render_cfg = {'shape': 'ellipse', 'fillcolor': str_color, 'style': 'filled'} cfgs[layername] = render_cfg - self.base_visualization(filepath, format=format, cfg=cfgs) + self.base_visualization(filepath, save_format=save_format, cfg=cfgs) - def visualization(self, filename, format='jpg', + def visualization(self, filename, save_format='jpg', flops_file=None, sensitivity_file=None, dependency_file=None): # First, visualize the network architecture only - self.base_visualization(filename, format=format) + self.base_visualization(filename, save_format=save_format) # if the flops file is specified, we also render # a image with the flops information. if flops_file is not None: flops_img = filename + '_flops' - self.visualize_with_flops(flops_img, format, flops_file) + self.visualize_with_flops(flops_img, save_format, flops_file) if dependency_file is not None: dependency_img = filename + '_dependency' self.visualize_with_dependency( - dependency_img, format, dependency_file) + dependency_img, save_format, dependency_file) if sensitivity_file is not None: sensitivity_img = filename + '_sensitivity' self.visualize_with_sensitivity( - sensitivity_img, format, sensitivity_file) + sensitivity_img, save_format, sensitivity_file) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py index 4ff35f5e5b..ee0ead97dc 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. -import torch import logging +import torch import numpy as np from .shape_dependency import ChannelDependency # logging.basicConfig(level = logging.DEBUG) @@ -16,11 +16,11 @@ def __init__(self, mask_file, model=None, dummy_input=None, graph=None): Parameters ---------- model: - model to fix the mask conflict + model to fix the mask conflict dummy_input: input example to trace the model mask_file: - the path of the original mask file + the path of the original mask file graph: the traced graph of the target model, is this parameter is not None, we donnot use the model and dummpy_input to get the trace graph. @@ -41,8 +41,8 @@ def __init__(self, mask_file, model=None, dummy_input=None, graph=None): def fix_mask_conflict(self): """ - Fix the mask conflict before the mask inference for the layers that - has shape dependencies. This function should be called before the + Fix the mask conflict before the mask inference for the layers that + has shape dependencies. This function should be called before the mask inference of the 'speedup' module. """ channel_depen = ChannelDependency(self.model, self.dummy_input, self.graph) @@ -51,50 +51,52 @@ def fix_mask_conflict(self): if len(dset) == 1: # This layer has no channel dependency with other layers continue - else: - channel_remain = set() - for name in dset: - if name not in self.masks: - # this layer is not pruned - continue - w_mask = self.masks[name]['weight'] - shape = w_mask.size() - count = np.prod(shape[1:]) - all_ones = [] - all_zeros = [] - for i in range(w_mask.size(0)): - _count = torch.sum(w_mask[i]) - if _count == count: - all_ones.append(i) - elif _count == 0: - all_zeros.append(i) - if len(all_ones) + len(all_zeros) < w_mask.size(0): - # In fine-grained pruning, there is no need to check - # the shape conflict - _logger.info(','.join(dset) + 'use fine-grained pruning') - break - else: - channel_remain.update(all_ones) - _logger.debug('Layer: '+name) - _logger.debug('Original pruned filters:' + str(all_zeros)) - # Update the masks for the layers in the dependency set - ori_channels = 0 - for name in dset: - mask = self.masks[name] - w_shape = mask['weight'].size() - ori_channels = w_shape[0] - for i in channel_remain: - mask['weight'][i] = torch.ones(w_shape[1:]) - if hasattr(mask, 'bias'): - mask['bias'][i] = 1 - _logger.info(','.join(dset)) - _logger.info('Pruned Filters after fixing conflict:') - pruned_filters = set(list(range(ori_channels)))-channel_remain - _logger.info(str(sorted(pruned_filters))) + channel_remain = set() + fine_grained = False + for name in dset: + if name not in self.masks: + # this layer is not pruned + continue + w_mask = self.masks[name]['weight'] + shape = w_mask.size() + count = np.prod(shape[1:]) + all_ones = [] + all_zeros = [] + for i in range(w_mask.size(0)): + _count = torch.sum(w_mask[i]) + if _count == count: + all_ones.append(i) + elif _count == 0: + all_zeros.append(i) + if len(all_ones) + len(all_zeros) < w_mask.size(0): + # In fine-grained pruning, there is no need to check + # the shape conflict + _logger.info('Layers %s using fine-grained pruning', ','.join(dset)) + fine_grained = True + break + channel_remain.update(all_ones) + _logger.debug('Layer: %s ', name) + _logger.debug('Original pruned filters: %s', str(all_zeros)) + # Update the masks for the layers in the dependency set + if fine_grained: + continue + ori_channels = 0 + for name in dset: + mask = self.masks[name] + w_shape = mask['weight'].size() + ori_channels = w_shape[0] + for i in channel_remain: + mask['weight'][i] = torch.ones(w_shape[1:]) + if hasattr(mask, 'bias'): + mask['bias'][i] = 1 + _logger.info(','.join(dset)) + _logger.info('Pruned Filters after fixing conflict:') + pruned_filters = set(list(range(ori_channels)))-channel_remain + _logger.info(str(sorted(pruned_filters))) return self.masks def export(self, path): """ Export the masks after fixing the conflict to file. """ - torch.save(self.masks, path) \ No newline at end of file + torch.save(self.masks, path) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py index 3a4ce4a3ce..2a69dfcd18 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py @@ -2,12 +2,10 @@ # Licensed under the MIT license. import csv -import torch -import queue import logging -import torch.nn as nn -from .graph_from_trace import * + +from .graph_from_trace import VisualGraph CONV_TYPE = 'aten::_convolution' @@ -23,10 +21,10 @@ def __init__(self, model=None, data=None, graph=None): layers in a model. Parameters - ---------- - model: + ---------- + model: The model to be analyzed. - data: + data: The example input data to trace the network architecture. graph: if we alreay has the traced graph of the target model, we donnot @@ -39,7 +37,7 @@ def __init__(self, model=None, data=None, graph=None): self.c2py = self.graph_builder.c2py self.dependency = {} self.build_channel_dependency() - + def get_parent_convs(self, node): """ @@ -49,7 +47,7 @@ def get_parent_convs(self, node): --------- node: target node. - + Returns ------- parent_convs: @@ -89,29 +87,29 @@ def build_channel_dependency(self): parent_convs = self.get_parent_convs(node) dependency_set = set(parent_convs) # merge the dependencies - for node in parent_convs: - if node in self.dependency: - dependency_set.update(self.dependency[node]) + for parent in parent_convs: + if parent in self.dependency: + dependency_set.update(self.dependency[parent]) # save the dependencies - for node in dependency_set: - self.dependency[node] = dependency_set + for _node in dependency_set: + self.dependency[_node] = dependency_set def filter_prune_check(self, ratios): """ According to the channel dependencies between the conv - layers, check if the filter pruning ratio for the conv + layers, check if the filter pruning ratio for the conv layers is legal. Parameters --------- - ratios: - the prune ratios for the layers. %ratios is a dict, - in which the keys are the names of the target layer - and the values are the prune ratio for the corresponding + ratios: + the prune ratios for the layers. %ratios is a dict, + in which the keys are the names of the target layer + and the values are the prune ratio for the corresponding layers. For example: ratios = {'body.conv1': 0.5, 'body.conv2':0.5} - Note: the name of the layers should looks like - the names that model.named_modules() functions + Note: the name of the layers should looks like + the names that model.named_modules() functions returns. Returns @@ -133,7 +131,7 @@ def filter_prune_check(self, ratios): return True def export(self, filepath): - """ + """ export the channel dependencies as a csv file. """ header = ['Dependency Set', 'Convolutional Layers'] @@ -160,7 +158,7 @@ def export(self, filepath): def dependency_sets(self): """ Get the list of the dependency set. - + Returns ------- dependency_sets: @@ -183,6 +181,3 @@ def dependency_sets(self): tmp_set.add(self.c2py[other].name) d_sets.append(tmp_set) return d_sets - - - diff --git a/src/sdk/pynni/requirements.txt b/src/sdk/pynni/requirements.txt index 885c909ca8..b786de051e 100644 --- a/src/sdk/pynni/requirements.txt +++ b/src/sdk/pynni/requirements.txt @@ -8,3 +8,6 @@ hyperopt==0.1.2 # metis tuner scikit-learn==0.20 + +# analysis_utils +graphviz diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_analysis_utils.py index f84e95ab15..7b7d6118f6 100644 --- a/src/sdk/pynni/tests/test_analysis_utils.py +++ b/src/sdk/pynni/tests/test_analysis_utils.py @@ -2,20 +2,17 @@ # Licensed under the MIT license. import os +import unittest +from unittest import TestCase, main import torch -import torchvision import torch.nn as nn -import torch.nn.functional as F import torchvision.models as models -import math import numpy as np -import unittest -from unittest import TestCase, main + from nni.compression.torch import L1FilterPruner from nni.analysis_utils.topology.torch.graph_from_trace import VisualGraph from nni.analysis_utils.topology.torch.shape_dependency import ChannelDependency from nni.analysis_utils.topology.torch.mask_conflict import MaskConflict -from nni.analysis_utils.sensitivity.torch.sensitivity_analysis import SensitivityAnalysis device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') prefix = 'analysis_test' @@ -29,17 +26,23 @@ {'layer3.0.downsample.0', 'layer3.1.conv2', 'layer3.0.conv2'}, {'layer4.0.downsample.0', 'layer4.1.conv2', 'layer4.0.conv2'}], 'resnet34': [{'conv1', 'layer1.2.conv2', 'layer1.1.conv2', 'layer1.0.conv2'}, - {'layer2.3.conv2', 'layer2.0.conv2', 'layer2.0.downsample.0','layer2.1.conv2', 'layer2.2.conv2'}, - {'layer3.3.conv2', 'layer3.0.conv2', 'layer3.4.conv2', 'layer3.0.downsample.0', 'layer3.5.conv2', 'layer3.1.conv2', 'layer3.2.conv2'}, + {'layer2.3.conv2', 'layer2.0.conv2', 'layer2.0.downsample.0', + 'layer2.1.conv2', 'layer2.2.conv2'}, + {'layer3.3.conv2', 'layer3.0.conv2', 'layer3.4.conv2', 'layer3.0.downsample.0', + 'layer3.5.conv2', 'layer3.1.conv2', 'layer3.2.conv2'}, {'layer4.0.downsample.0', 'layer4.1.conv2', 'layer4.2.conv2', 'layer4.0.conv2'}], 'mobilenet_v2': [{'features.3.conv.2', 'features.2.conv.2'}, {'features.6.conv.2', 'features.4.conv.2', 'features.5.conv.2'}, - {'features.8.conv.2', 'features.7.conv.2', 'features.10.conv.2', 'features.9.conv.2'}, - {'features.11.conv.2', 'features.13.conv.2', 'features.12.conv.2'}, + {'features.8.conv.2', 'features.7.conv.2', + 'features.10.conv.2', 'features.9.conv.2'}, + {'features.11.conv.2', 'features.13.conv.2', + 'features.12.conv.2'}, {'features.14.conv.2', 'features.16.conv.2', 'features.15.conv.2'}], 'wide_resnet50_2': [{'layer1.2.conv3', 'layer1.1.conv3', 'layer1.0.conv3', 'layer1.0.downsample.0'}, - {'layer2.1.conv3', 'layer2.0.conv3', 'layer2.0.downsample.0', 'layer2.2.conv3', 'layer2.3.conv3'}, - {'layer3.3.conv3', 'layer3.0.conv3', 'layer3.2.conv3', 'layer3.0.downsample.0', 'layer3.1.conv3', 'layer3.4.conv3', 'layer3.5.conv3'}, + {'layer2.1.conv3', 'layer2.0.conv3', 'layer2.0.downsample.0', + 'layer2.2.conv3', 'layer2.3.conv3'}, + {'layer3.3.conv3', 'layer3.0.conv3', 'layer3.2.conv3', 'layer3.0.downsample.0', + 'layer3.1.conv3', 'layer3.4.conv3', 'layer3.5.conv3'}, {'layer4.1.conv3', 'layer4.2.conv3', 'layer4.0.downsample.0', 'layer4.0.conv3'}], 'alexnet': [], 'vgg11': [], @@ -53,6 +56,7 @@ unittest.TestLoader.sortTestMethodsUsing = None + class AnalysisUtilsTest(TestCase): def test_channel_dependency(self): @@ -110,7 +114,8 @@ def test_mask_conflict(self): if isinstance(layer, nn.Conv2d): # pruner cannot allow the sparsity to be 0 or 1 sparsity = np.random.uniform(0.01, 0.99) - cfg = {'op_types':['Conv2d'], 'op_names':[layername], 'sparsity':sparsity} + cfg = {'op_types': ['Conv2d'], 'op_names': [ + layername], 'sparsity': sparsity} cfglist.append(cfg) pruner = L1FilterPruner(net, cfglist) pruner.compress() @@ -126,17 +131,21 @@ def test_mask_conflict(self): # fix the mask conflict successfully for dset in channel_dependency_ground_truth[name]: lset = list(dset) - for i in range(len(lset)): - assert fixed_mask[lset[0]]['weight'].size(0) == fixed_mask[lset[i]]['weight'].size(0) - w_index1 = self.get_pruned_index(fixed_mask[lset[0]]['weight']) - w_index2 = self.get_pruned_index(fixed_mask[lset[i]]['weight']) + for i, _ in enumerate(lset): + assert fixed_mask[lset[0]]['weight'].size( + 0) == fixed_mask[lset[i]]['weight'].size(0) + w_index1 = self.get_pruned_index( + fixed_mask[lset[0]]['weight']) + w_index2 = self.get_pruned_index( + fixed_mask[lset[i]]['weight']) assert w_index1 == w_index2 if hasattr(fixed_mask[lset[0]], 'bias'): - b_index1 = self.get_pruned_index(fixed_mask[lset[0]]['bias']) - b_index2 = self.get_pruned_index(fixed_mask[lset[i]]['bias']) + b_index1 = self.get_pruned_index( + fixed_mask[lset[0]]['bias']) + b_index2 = self.get_pruned_index( + fixed_mask[lset[i]]['bias']) assert b_index1 == b_index2 - if __name__ == '__main__': main() From d0bda49e150c90ae5ed5859f7cca551e9c62914f Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 28 May 2020 03:59:47 +0000 Subject: [PATCH 18/47] Add dependencies. Signed-off-by: Ningxin --- deployment/pypi/setup.py | 4 +++- setup.py | 4 +++- src/sdk/pynni/requirements.txt | 1 + src/sdk/pynni/setup.py | 4 +++- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/deployment/pypi/setup.py b/deployment/pypi/setup.py index 3c2d433790..356612db62 100644 --- a/deployment/pypi/setup.py +++ b/deployment/pypi/setup.py @@ -62,7 +62,9 @@ 'scipy', 'coverage', 'colorama', - 'scikit-learn>=0.20,<0.22' + 'scikit-learn>=0.20,<0.22', + 'graphviz', + 'matplotlib' ], classifiers = [ 'Programming Language :: Python :: 3', diff --git a/setup.py b/setup.py index 8a3733776f..1ddd057873 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,9 @@ def read(fname): 'schema', 'PythonWebHDFS', 'colorama', - 'scikit-learn>=0.20,<0.22' + 'scikit-learn>=0.20,<0.22', + 'graphviz', + 'matplotlib' ], entry_points = { diff --git a/src/sdk/pynni/requirements.txt b/src/sdk/pynni/requirements.txt index b786de051e..b7d9b0cf90 100644 --- a/src/sdk/pynni/requirements.txt +++ b/src/sdk/pynni/requirements.txt @@ -11,3 +11,4 @@ scikit-learn==0.20 # analysis_utils graphviz +matplotlib diff --git a/src/sdk/pynni/setup.py b/src/sdk/pynni/setup.py index 4e006a9014..41a603023b 100644 --- a/src/sdk/pynni/setup.py +++ b/src/sdk/pynni/setup.py @@ -18,7 +18,9 @@ def read(fname): 'json_tricks', 'numpy', 'scipy', - 'coverage' + 'coverage', + 'graphviz', + 'matplotlib' ], package_data = {'nni': ['**/requirements.txt']}, From 4154cf02a617ba191ab6b2065b35f58a294f3a1c Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 28 May 2020 05:16:29 +0000 Subject: [PATCH 19/47] comment the visualization test temporarily. Signed-off-by: Ningxin --- .../topology/torch/graph_from_trace.py | 2 +- .../topology/torch/shape_dependency.py | 2 +- src/sdk/pynni/tests/test_analysis_utils.py | 28 ++++++++++--------- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py index 95292ddc5c..be9c24e3a4 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py @@ -32,7 +32,7 @@ def __init__(self, cnode, isValue=False): if torch.__version__ >= '1.4.0': # note, the scopeName of node may be empty scopename = re.split('/', scopename) - self.name = scopename[-1] if len(scopename) > 0 else '' + self.name = scopename[-1] if scopename else '' else: self.name = '.'.join(re.findall(r'\[(.*?)\]', scopename)) # remove the __module prefix diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py index 2a69dfcd18..92f52607c1 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py +++ b/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py @@ -56,7 +56,7 @@ def get_parent_convs(self, node): parent_convs = [] queue = [] queue.append(node) - while len(queue) > 0: + while queue: curnode = queue.pop(0) if curnode in self.c2py and self.c2py[curnode].isOp \ and curnode.kind() == CONV_TYPE: diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_analysis_utils.py index 7b7d6118f6..da80a56989 100644 --- a/src/sdk/pynni/tests/test_analysis_utils.py +++ b/src/sdk/pynni/tests/test_analysis_utils.py @@ -77,19 +77,21 @@ def test_channel_dependency(self): assert d_set_count == len(channel_dependency_ground_truth[name]) fpath = os.path.join(outdir, name) channel_depen.export(fpath) - - def test_visulization(self): - outdir = os.path.join(prefix, 'visual') - os.makedirs(outdir, exist_ok=True) - for name in model_names: - print('Visualization for %s' % name) - model = getattr(models, name) - net = model().to(device) - dummy_input = torch.ones(1, 3, 224, 224).to(device) - vg = VisualGraph(net, dummy_input) - picpath = os.path.join(outdir, name) - depen_file = os.path.join('analysis_test/dependency', name) - vg.visualization(picpath, dependency_file=depen_file) + # comments the visulization test temporarily + # because, this test needs the graphviz package + # in ths os.(apt install graphviz) + # def test_visulization(self): + # outdir = os.path.join(prefix, 'visual') + # os.makedirs(outdir, exist_ok=True) + # for name in model_names: + # print('Visualization for %s' % name) + # model = getattr(models, name) + # net = model().to(device) + # dummy_input = torch.ones(1, 3, 224, 224).to(device) + # vg = VisualGraph(net, dummy_input) + # picpath = os.path.join(outdir, name) + # depen_file = os.path.join('analysis_test/dependency', name) + # vg.visualization(picpath, dependency_file=depen_file) def get_pruned_index(self, mask): pruned_indexes = [] From 83f0b268a086ea80cc85d628eb736e7d7e897ba9 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 28 May 2020 06:00:33 +0000 Subject: [PATCH 20/47] update --- src/sdk/pynni/tests/test_analysis_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_analysis_utils.py index da80a56989..413346b8c8 100644 --- a/src/sdk/pynni/tests/test_analysis_utils.py +++ b/src/sdk/pynni/tests/test_analysis_utils.py @@ -65,7 +65,7 @@ def test_channel_dependency(self): for name in model_names: print('Analyze channel dependency for %s' % name) model = getattr(models, name) - net = model(pretrained=True).to(device) + net = model().to(device) dummy_input = torch.ones(1, 3, 224, 224).to(device) channel_depen = ChannelDependency(net, dummy_input) depen_sets = channel_depen.dependency_sets From 388056cb9776c4c0b2e76b27f5885a0e213c3713 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 28 May 2020 06:28:35 +0000 Subject: [PATCH 21/47] Skip the test when the torch version is too old. Signed-off-by: Ningxin --- src/sdk/pynni/tests/test_analysis_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_analysis_utils.py index 413346b8c8..5761172d0a 100644 --- a/src/sdk/pynni/tests/test_analysis_utils.py +++ b/src/sdk/pynni/tests/test_analysis_utils.py @@ -58,7 +58,7 @@ class AnalysisUtilsTest(TestCase): - + @unittest.skipIf(torch.__version__ < "1.3.0", "not supported") def test_channel_dependency(self): outdir = os.path.join(prefix, 'dependency') os.makedirs(outdir, exist_ok=True) @@ -102,6 +102,7 @@ def get_pruned_index(self, mask): return pruned_indexes + @unittest.skipIf(torch.__version__ < "1.3.0", "not supported") def test_mask_conflict(self): outdir = os.path.join(prefix, 'masks') os.makedirs(outdir, exist_ok=True) From ccbcc6c99112a951c85c300dac53fc34ad80254a Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 28 May 2020 07:10:27 +0000 Subject: [PATCH 22/47] update --- src/sdk/pynni/tests/test_analysis_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_analysis_utils.py index 5761172d0a..ffb29c13ff 100644 --- a/src/sdk/pynni/tests/test_analysis_utils.py +++ b/src/sdk/pynni/tests/test_analysis_utils.py @@ -17,7 +17,7 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') prefix = 'analysis_test' model_names = ['alexnet', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg19', - 'resnet18', 'resnet34', 'squeezenet1_1', 'googlenet', + 'resnet18', 'resnet34', 'squeezenet1_1', 'shufflenet_v2_x1_0', 'mobilenet_v2', 'wide_resnet50_2'] channel_dependency_ground_truth = { From 4ce82559cca2c675f294092160d894b169946379 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 1 Jun 2020 02:42:19 +0000 Subject: [PATCH 23/47] update according to the review comments. Signed-off-by: Ningxin --- .../analysis_utils/__init__.py | 0 .../analysis_utils/sensitivity/__init__.py | 0 .../sensitivity/torch/__init__.py | 0 .../sensitivity/torch/sensitivity_analysis.py | 93 ++++++++++--------- .../analysis_utils/topology/__init__.py | 0 .../analysis_utils/topology/torch/__init__.py | 0 .../topology/torch/graph_from_trace.py | 46 ++++----- .../topology/torch/mask_conflict.py | 24 ++--- .../topology/torch/shape_dependency.py | 52 +++++------ 9 files changed, 112 insertions(+), 103 deletions(-) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/__init__.py (100%) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/sensitivity/__init__.py (100%) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/sensitivity/torch/__init__.py (100%) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/sensitivity/torch/sensitivity_analysis.py (76%) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/topology/__init__.py (100%) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/topology/torch/__init__.py (100%) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/topology/torch/graph_from_trace.py (92%) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/topology/torch/mask_conflict.py (85%) rename src/sdk/pynni/nni/{ => compression}/analysis_utils/topology/torch/shape_dependency.py (81%) diff --git a/src/sdk/pynni/nni/analysis_utils/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/__init__.py similarity index 100% rename from src/sdk/pynni/nni/analysis_utils/__init__.py rename to src/sdk/pynni/nni/compression/analysis_utils/__init__.py diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/__init__.py similarity index 100% rename from src/sdk/pynni/nni/analysis_utils/sensitivity/__init__.py rename to src/sdk/pynni/nni/compression/analysis_utils/sensitivity/__init__.py diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/__init__.py similarity index 100% rename from src/sdk/pynni/nni/analysis_utils/sensitivity/torch/__init__.py rename to src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/__init__.py diff --git a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py similarity index 76% rename from src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py rename to src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py index 27d40350ce..6e70e4a97d 100644 --- a/src/sdk/pynni/nni/analysis_utils/sensitivity/torch/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -32,25 +32,34 @@ def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop Perform sensitivity analysis for this model. Parameters ---------- - model: - the model to perform sensitivity analysis - val_func: - validation function for the model. Due to - different models may need different dataset/criterion - , therefore the user need to cover this part by themselves. - val_func take the model as the first input parameter, and - return the accuracy as output. - sparsities: - The sparsity list provided by users. - prune_type: - The pruner type used to prune the conv layers, default is 'l1', - and 'l2', 'fine-grained' is also supported. - early_stop: - If this flag is set, the sensitivity analysis - for a conv layer will early stop when the accuracy - drop already reach the value of early_stop (0.05 for example). - The default value is 1.0, which means the analysis won't stop - until all given sparsities are tested. + model : torch.nn.Module + the model to perform sensitivity analysis + val_func : function + validation function for the model. Due to + different models may need different dataset/criterion + , therefore the user need to cover this part by themselves. + In the val_func, the model should be tested on the validation dateset, + and the validation accuracy should be returned as the output of val_func. + There are no restrictions on the input parameters of the val_function. + User can use the val_args, val_kwargs parameters in analysis + to pass all the parameters that val_func needed. + + sparsities : list + The sparsity list provided by users. This parameter is set when the user + only wants to test some specific sparsities. In the sparsity list, each element + is a sparsity value which means how much weight the pruner should prune. Take + [0.25, 0.5, 0.75] for an example, the SensitivityAnalysis will prune 25% 50% 75% + weights gradually for each layer. + + prune_type : str + The pruner type used to prune the conv layers, default is 'l1', + and 'l2', 'fine-grained' is also supported. + early_stop : float + If this flag is set, the sensitivity analysis + for a conv layer will early stop when the accuracy + drop already reach the value of early_stop (0.05 for example). + The default value is 1.0, which means the analysis won't stop + until all given sparsities are tested. """ self.model = model @@ -94,29 +103,29 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): """ This function analyze the sensitivity to pruning for each conv layer in the target model. - If %start and %end are not set, we analyze all the conv + If start and end are not set, we analyze all the conv layers by default. Users can specify several layers to analyze or parallelize the analysis process easily through - the %start and %end parameter. + the start and end parameter. Parameters ---------- - start: - Layer index of the sensitivity analysis start - end: - Layer index of the sensitivity analysis end - val_args: - args for the val_function - val_kwargs: - kwargs for the val_funtion - The val_funtion will be called as: - val_function(*val_args, **val_kwargs) + val_args : list + args for the val_function + val_kwargs : dict + kwargs for the val_funtion + The val_funtion will be called as: + val_function(*val_args, **val_kwargs). + start : int + Layer index of the sensitivity analysis start. + end : int + Layer index of the sensitivity analysis end. Returns ------- - sensitivities: - dict object that stores the trajectory of the - accuracy when the prune ratio changes + sensitivities : dict + dict object that stores the trajectory of the + accuracy when the prune ratio changes """ if not end: end = self.layers_count @@ -169,12 +178,12 @@ def visualization(self, outdir, merge=False): Parameters ---------- - outdir: - output directory of the image - merge: - if merge all the sensitivity curves into a - single image. If not, we will draw a picture - for each target layer of the model. + outdir : str + output directory of the image + merge : bool + if merge all the sensitivity curves into a + single image. If not, we will draw a picture + for each target layer of the model. """ os.makedirs(outdir, exist_ok=True) LineStyles = [':', '-.', '--', '-'] @@ -228,8 +237,8 @@ def export(self, filepath): Parameters ---------- - filepath: - Path of the output file + filepath : str + Path of the output file """ str_sparsities = [str(x) for x in self.sparsities] header = ['layername'] + str_sparsities diff --git a/src/sdk/pynni/nni/analysis_utils/topology/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/__init__.py similarity index 100% rename from src/sdk/pynni/nni/analysis_utils/topology/__init__.py rename to src/sdk/pynni/nni/compression/analysis_utils/topology/__init__.py diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/__init__.py similarity index 100% rename from src/sdk/pynni/nni/analysis_utils/topology/torch/__init__.py rename to src/sdk/pynni/nni/compression/analysis_utils/topology/torch/__init__.py diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/graph_from_trace.py similarity index 92% rename from src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py rename to src/sdk/pynni/nni/compression/analysis_utils/topology/torch/graph_from_trace.py index be9c24e3a4..8100dfbe46 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/graph_from_trace.py +++ b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/graph_from_trace.py @@ -69,13 +69,13 @@ def __init__(self, model=None, data=None, graph=None): to make the graph more clear. Parameters ---------- - model: - The model to build the network architecture. - data: - The sample input data for the model. - graph: - Traced graph from jit.trace, if this option is set, - we donnot need to trace the model again. + model : torch.nn.Module + The model to build the network architecture. + data : torch.Tensor + The sample input data for the model. + graph : torch._C.Graph + Traced graph from jit.trace, if this option is set, + we donnot need to trace the model again. """ self.model = model self.data = data @@ -145,18 +145,18 @@ def visual_traverse(self, curnode, graph, lastnode, cfg): at the same time. Parameters ---------- - curnode: - Current visiting node(tensor/module). - graph: - The handle of the Dgraph. - lastnode: - The last visited node. - cfg: - Dict object to specify the rendering - configuration for operation node. - key is the name of the operation, - value is a also a dict. For example, - {'conv1': {'shape':'box', 'color':'red'}} + curnode : torch._C.Node + Current visiting node(tensor/module). + graph : graphviz.dot.Digraph + The handle of the Dgraph. + lastnode : torch._C.Node + The last visited node. + cfg : dict + Dict object to specify the rendering + configuration for operation node. + key is the name of the operation, + value is a also a dict. For example, + {'conv1': {'shape':'box', 'color':'red'}} """ if curnode in self.visited: if lastnode is not None: @@ -185,10 +185,10 @@ def base_visualization(self, filename, save_format='jpg', cfg=None): visualize the network architecture automaticlly. Parameters ---------- - filename: - The filename of the saved image file. - save_format: - The output save_format. + filename : str + The filename of the saved image file. + save_format : str + The output save_format. """ # TODO and detailed mode for the visualization function # in which the graph will also contain all the weights/bias diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/mask_conflict.py similarity index 85% rename from src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py rename to src/sdk/pynni/nni/compression/analysis_utils/topology/torch/mask_conflict.py index ee0ead97dc..98b798e761 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/mask_conflict.py +++ b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/mask_conflict.py @@ -15,15 +15,15 @@ def __init__(self, mask_file, model=None, dummy_input=None, graph=None): Parameters ---------- - model: - model to fix the mask conflict - dummy_input: - input example to trace the model - mask_file: - the path of the original mask file - graph: - the traced graph of the target model, is this parameter is not None, - we donnot use the model and dummpy_input to get the trace graph. + model : torch.nn.Module + model to fix the mask conflict + dummy_input : torch.Tensor + input example to trace the model + mask_file : str + the path of the original mask file + graph : torch._C.Graph + the traced graph of the target model, is this parameter is not None, + we donnot use the model and dummpy_input to get the trace graph. """ # check if the parameters are valid parameter_valid = False @@ -41,9 +41,9 @@ def __init__(self, mask_file, model=None, dummy_input=None, graph=None): def fix_mask_conflict(self): """ - Fix the mask conflict before the mask inference for the layers that - has shape dependencies. This function should be called before the - mask inference of the 'speedup' module. + Fix the mask conflict before the mask inference for the layers that + has shape dependencies. This function should be called before the + mask inference of the 'speedup' module. """ channel_depen = ChannelDependency(self.model, self.dummy_input, self.graph) depen_sets = channel_depen.dependency_sets diff --git a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py similarity index 81% rename from src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py rename to src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py index 92f52607c1..7d5e601718 100644 --- a/src/sdk/pynni/nni/analysis_utils/topology/torch/shape_dependency.py +++ b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py @@ -22,13 +22,13 @@ def __init__(self, model=None, data=None, graph=None): Parameters ---------- - model: - The model to be analyzed. - data: - The example input data to trace the network architecture. - graph: - if we alreay has the traced graph of the target model, we donnot - need to trace the model again. + model : torch.nn.Module + The model to be analyzed. + data : torch.Tensor + The example input data to trace the network architecture. + graph : torch._C.Graph + if we alreay has the traced graph of the target model, we donnot + need to trace the model again. """ self.graph_builder = VisualGraph(model, data, graph) self.cnodes = list(self.graph_builder.graph.nodes()) @@ -45,13 +45,13 @@ def get_parent_convs(self, node): Parameters --------- - node: - target node. + node : torch._C.Node + target node. Returns ------- - parent_convs: - nearest father conv layers for the target worknode. + parent_convs: list + nearest father conv layers for the target worknode. """ parent_convs = [] queue = [] @@ -73,8 +73,8 @@ def get_parent_convs(self, node): def build_channel_dependency(self): """ - Build the channel dependency for the conv layers - in the model. + Build the channel dependency for the conv layers + in the model. """ for node in self.cnodes: parent_convs = [] @@ -102,19 +102,19 @@ def filter_prune_check(self, ratios): Parameters --------- - ratios: - the prune ratios for the layers. %ratios is a dict, - in which the keys are the names of the target layer - and the values are the prune ratio for the corresponding - layers. For example: - ratios = {'body.conv1': 0.5, 'body.conv2':0.5} - Note: the name of the layers should looks like - the names that model.named_modules() functions - returns. + ratios : dict + the prune ratios for the layers. %ratios is a dict, + in which the keys are the names of the target layer + and the values are the prune ratio for the corresponding + layers. For example: + ratios = {'body.conv1': 0.5, 'body.conv2':0.5} + Note: the name of the layers should looks like + the names that model.named_modules() functions + returns. Returns ------- - True/False + True/False """ for node in self.cnodes: @@ -161,9 +161,9 @@ def dependency_sets(self): Returns ------- - dependency_sets: - list of the dependency sets. For example, - [set(['conv1', 'conv2']), set(['conv3', 'conv4'])] + dependency_sets : list + list of the dependency sets. For example, + [set(['conv1', 'conv2']), set(['conv3', 'conv4'])] """ d_sets = [] From 0f70f67482e9a077089adffa04bb7d2317153b54 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 1 Jun 2020 03:17:41 +0000 Subject: [PATCH 24/47] update according to review comments. Signed-off-by: Ningxin --- .../sensitivity/torch/sensitivity_analysis.py | 12 +++++++++++- src/sdk/pynni/tests/test_analysis_utils.py | 6 +++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py index 6e70e4a97d..f9a1287b05 100644 --- a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -233,8 +233,18 @@ def visualization(self, outdir, merge=False): def export(self, filepath): """ Export the results of the sensitivity analysis - to a csv file. + to a csv file. The firstline of the csv file describe the content + structure. The first line is constructed by 'layername' and sparsity + list. Each line below records the accuracy of a layer under different + sparsities. Note that, due to the early_stop option, some layers may + not have all accuracies under different sparsities, because his accuracy + drop has alreay exceeded the threshold set by the user. The following is an + example output for export. + layername, 0.25, 0.5, 0.75 + conv1, 0.6, 0.55 + conv2, 0.61, 0.57, 0.56 + Parameters ---------- filepath : str diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_analysis_utils.py index ffb29c13ff..c2b0285530 100644 --- a/src/sdk/pynni/tests/test_analysis_utils.py +++ b/src/sdk/pynni/tests/test_analysis_utils.py @@ -10,9 +10,9 @@ import numpy as np from nni.compression.torch import L1FilterPruner -from nni.analysis_utils.topology.torch.graph_from_trace import VisualGraph -from nni.analysis_utils.topology.torch.shape_dependency import ChannelDependency -from nni.analysis_utils.topology.torch.mask_conflict import MaskConflict +from nni.compression.analysis_utils.topology.torch.graph_from_trace import VisualGraph +from nni.compression.analysis_utils.topology.torch.shape_dependency import ChannelDependency +from nni.compression.analysis_utils.topology.torch.mask_conflict import MaskConflict device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') prefix = 'analysis_test' From 2eac2592d70456a507db1bcda9912e706f5bbee8 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 1 Jun 2020 06:56:32 +0000 Subject: [PATCH 25/47] Add docs for analysis_utils. Signed-off-by: Ningxin --- docs/en_US/Compressor/AnalysisUtils.md | 100 ++++++++++++++++++ .../sensitivity/torch/sensitivity_analysis.py | 15 ++- 2 files changed, 107 insertions(+), 8 deletions(-) create mode 100644 docs/en_US/Compressor/AnalysisUtils.md diff --git a/docs/en_US/Compressor/AnalysisUtils.md b/docs/en_US/Compressor/AnalysisUtils.md new file mode 100644 index 0000000000..a26e51bab0 --- /dev/null +++ b/docs/en_US/Compressor/AnalysisUtils.md @@ -0,0 +1,100 @@ +# Analysis Utils for Model Compression +We provide several easy-to-use tools for users to analyze their model during model compression. + +## Sensitivity +First, we provide a sensitivity analysis tool (**SensitivityAnalysis**) for users to analyze the sensitivity of each convolutional layer in their model. Specifically, the SensitiviyAnalysis gradually prune each layer of the model, and test the accuracy of the model at the same time. Note that, SensitivityAnalysis only prunes a layer once a time, and the other layers are set to their original weights. According to the accuracies of different convolutional layers under different sparsities, we can easily find out which layers the model accuracy is more sensitive to. + +### Usage + +Following codes show the basic usage of the SensitivityAnalysis. +```python +from nni.compression.analysis_utils.sensitivity.torch.sensitivity_analysis import SensitivityAnalysis + +def val(model): + model.eval() + total = 0 + correct = 0 + with torch.no_grad(): + for batchid, (data, label) in enumerate(val_loader): + data, label = data.cuda(), label.cuda() + out = model(data) + _, predicted = out.max(1) + total += data.size(0) + correct += predicted.eq(label).sum().item() + return correct / total + +s_analyzer = SensitivityAnalysis(model=net, val_func=val) +sensitivity = s_analyzer.analysis(val_args=[net]) +os.makedir(outdir) +s_analyzer.export(os.path.join(outdir, filename)) +s_analyzer.visualization(outdir, merge=True) +s_analyzer.visualization(outdir, merge=False) +``` + +Two key parameters of SensitivityAnalysis are model, and val_func. 'model' is the neural network that to be analyzed and the 'val_func' is the validation function that returns the model accuracy on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy on the dataset and pass it to SensitivityAnalysis. +SensitivityAnalysis can export the sensitivity results as a csv file and visualization the sensitivity results of each layer in the model, usage is shown in the example above. + +Futhermore, users can specify the sparsities values used to prune for each layer by optinal parameter 'sparsities'. +```python +s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75]) +``` +the SensitivityAnalysis will prune 25% 50% 75% weights gradually for each layer, and record the model's accuracy at the same time (SensitivityAnalysis only prune a layer once a time, the other layers are set to their original weights). If the sparsities is not set, SensitivityAnalysis will use the numpy.arange(0.1, 1.0, 0.1) as the default sparsity values. + +Users can also speed up the progress of sensitivity analysis by the early_stop option. By default, the SensitivityAnalysis will test the accuracy under all sparsities for each layer. In contrast, when the early_stop is set, the sensitivity analysis for a layer will stop, when the accuracy drop already reaches the threshold set by early_stop. +```python +s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75], early_stop=0.1) +``` + +### Output example +The following lines are the example csv file exported from SensitivityAnalysis. The first line is constructed by 'layername' and sparsity list. Here the sparsity value means how much weight SensitivityAnalysis prune for each layer. Each line below records the model accuracy when this layer is under different sparsities. Note that, due to the early_stop option, some layers may +not have model accuracies under all sparsities, because its accuracy drop has alreay exceeded the threshold set by the user. +``` +layername,0.05,0.1,0.2,0.3,0.4,0.5,0.7,0.85,0.95 +features.0,0.54566,0.46308,0.06978,0.0374,0.03024,0.01512,0.00866,0.00492,0.00184 +features.3,0.54878,0.51184,0.37978,0.19814,0.07178,0.02114,0.00438,0.00442,0.00142 +features.6,0.55128,0.53566,0.4887,0.4167,0.31178,0.19152,0.08612,0.01258,0.00236 +features.8,0.55696,0.54194,0.48892,0.42986,0.33048,0.2266,0.09566,0.02348,0.0056 +features.10,0.55468,0.5394,0.49576,0.4291,0.3591,0.28138,0.14256,0.05446,0.01578 +``` + +## Topology +We also provide several tools for the topology analysis during the model compression. + +### ChannelDependency +Complicated models may has residual connection/concat operations in their models. When the user prune these models, they need to be careful about the channel-count dependencies between the convolution layers in the model. If the layers has channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then even the pruned model with mask works fine. but the pruned model cannot be speedup to the final model that run on the devices, because there will be a shape conflict when the model try to add/concat the outputs of these layers. This model is to find the layers that has channel count dependencies to help user better prune their model. + +#### Usage +```python +from nni.compression.analysis_utils.topology.torch.shape_dependency import ChannelDependency +data = torch.ones(1, 3, 224, 224).cuda() +channel_depen = ChannelDependency(net, data) +channel_depen.export('dependency.csv') +``` + +#### Output Example +Following lines are the output example of torchvision.models.resnet18 exported by ChannelDependency. The layers at the same line have output channel dependencies with each other. For example, layer1.1.conv2, conv1, and layer1.0.conv2 have output channel dependencies with each other, which means the output channel(filters) numbers of these three layers should be same with each other, otherwise the model may has shape conflict. +``` +Dependency Set,Convolutional Layers +Set 1,layer1.1.conv2,layer1.0.conv2,conv1 +Set 2,layer1.0.conv1 +Set 3,layer1.1.conv1 +Set 4,layer2.0.conv1 +Set 5,layer2.1.conv2,layer2.0.conv2,layer2.0.downsample.0 +Set 6,layer2.1.conv1 +Set 7,layer3.0.conv1 +Set 8,layer3.0.downsample.0,layer3.1.conv2,layer3.0.conv2 +Set 9,layer3.1.conv1 +Set 10,layer4.0.conv1 +Set 11,layer4.0.downsample.0,layer4.1.conv2,layer4.0.conv2 +Set 12,layer4.1.conv1 +``` + +### MaskConflict +When the masks of different layers in a model has conflict, we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value. + +``` +from analysis_utils.topology.torch.mask_conflict import MaskConflict +mc = MaskConflict('./resnet18_mask', net, data) +mc.fix_mask_conflict() +mc.export('./resnet18_fixed_mask') +``` \ No newline at end of file diff --git a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py index f9a1287b05..ad969aa131 100644 --- a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py @@ -164,8 +164,8 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): if val_acc + self.early_stop < self.ori_acc: break - # reset the weights pruned by the pruner, because - # out sparsities is sorted, so we donnot need to reset + # reset the weights pruned by the pruner, because the + # input sparsities is sorted, so we donnot need to reset # weight of the layer when the sparsity changes, instead, # we only need reset the weight when the pruning layer changes. self.model.load_state_dict(self.ori_state_dict) @@ -235,16 +235,15 @@ def export(self, filepath): Export the results of the sensitivity analysis to a csv file. The firstline of the csv file describe the content structure. The first line is constructed by 'layername' and sparsity - list. Each line below records the accuracy of a layer under different - sparsities. Note that, due to the early_stop option, some layers may - not have all accuracies under different sparsities, because his accuracy - drop has alreay exceeded the threshold set by the user. The following is an - example output for export. + list. Each line below records the model accuracy when this layer is + under different sparsities. Note that, due to the early_stop option, + some layers may not have model accuracies under all sparsities, because + its accuracy drop has alreay exceeded the threshold set by the user. layername, 0.25, 0.5, 0.75 conv1, 0.6, 0.55 conv2, 0.61, 0.57, 0.56 - + Parameters ---------- filepath : str From 810f20e774c37281562b5a595e022b560608cc4e Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 1 Jun 2020 07:52:51 +0000 Subject: [PATCH 26/47] update rst --- docs/en_US/model_compression.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en_US/model_compression.rst b/docs/en_US/model_compression.rst index 457acfadce..1b6137841f 100644 --- a/docs/en_US/model_compression.rst +++ b/docs/en_US/model_compression.rst @@ -22,3 +22,4 @@ For details, please refer to the following tutorials: Model Speedup Automatic Model Compression Implementation + AnalysisUtils From 3b9f4df63dbcd47ec63daf631e482cfec541a0ad Mon Sep 17 00:00:00 2001 From: Ningxin Date: Wed, 10 Jun 2020 01:35:47 +0000 Subject: [PATCH 27/47] Use TorchModuleGraph to analyze the shape dependency. Signed-off-by: Ningxin --- .../topology/torch/shape_dependency.py | 98 ++++++++++--------- 1 file changed, 54 insertions(+), 44 deletions(-) diff --git a/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py index 7d5e601718..3564866277 100644 --- a/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py +++ b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py @@ -2,11 +2,11 @@ # Licensed under the MIT license. import csv +import torch import logging - -from .graph_from_trace import VisualGraph - +import nni._graph_utils as _graph_utils +from _graph_utils import TorchModuleGraph CONV_TYPE = 'aten::_convolution' ADD_TYPES = ['aten::add', 'aten::add_'] @@ -15,7 +15,7 @@ class ChannelDependency: - def __init__(self, model=None, data=None, graph=None): + def __init__(self, model=None, dummy_input=None, traced_model=None): """ This model analyze the channel dependencis between the conv layers in a model. @@ -26,17 +26,17 @@ def __init__(self, model=None, data=None, graph=None): The model to be analyzed. data : torch.Tensor The example input data to trace the network architecture. - graph : torch._C.Graph + traced_model : torch._C.Graph if we alreay has the traced graph of the target model, we donnot need to trace the model again. """ - self.graph_builder = VisualGraph(model, data, graph) - self.cnodes = list(self.graph_builder.graph.nodes()) - self.graph = self.graph_builder.graph - self.forward_edge = self.graph_builder.forward_edge - self.c2py = self.graph_builder.c2py - self.dependency = {} + # check if the input is legal + if traced_model is None: + # user should provide model & dummy_input to trace the model or a already traced model + assert model is not None and dummy_input is not None + self.graph = TorchModuleGraph(model, dummy_input, traced_model) self.build_channel_dependency() + self.dependency = {} def get_parent_convs(self, node): @@ -58,17 +58,14 @@ def get_parent_convs(self, node): queue.append(node) while queue: curnode = queue.pop(0) - if curnode in self.c2py and self.c2py[curnode].isOp \ - and curnode.kind() == CONV_TYPE: + if node.op_type == 'Conv2d': # find the first met conv - parent_convs.append(curnode) + parent_convs.append(curnode.name) continue - parents = self.c2py[curnode].parents() + parents = self.graph.find_predecessors(curnode.unique_name) + parents = [self.graph.name_to_node[name] for name in parents] for parent in parents: - if parent in self.c2py and (self.c2py[parent].isOp or 'Tensor' in str(parent.type())): - # filter the scalar parameters of the functions - # only consider the Tensors/ List(Tensor) - queue.append(parent) + queue.append(parent) return parent_convs def build_channel_dependency(self): @@ -76,13 +73,25 @@ def build_channel_dependency(self): Build the channel dependency for the conv layers in the model. """ - for node in self.cnodes: + for node in self.graph.nodes_py.nodes_op: parent_convs = [] - if node.kind() in ADD_TYPES: + # find the node that contains aten::add + # or aten::cat operations + if node.op_type in ADD_TYPES: parent_convs = self.get_parent_convs(node) - if node.kind() == CAT_TYPE: - cat_dim = list(node.inputs())[1].toIValue() - # N * C * H * W + elif node.op_type == CAT_TYPE: + # To determine if this cat operation will introduce channel + # dependency, we need the specific input parameters of the cat + # opertion. To get the input parameters of the cat opertion, we + # need to traverse all the cpp_nodes included by this NodePyGroup, + # because, TorchModuleGraph merges the important nodes and the adjacent + # unimportant nodes (nodes started with prim::attr, for example) into a + # NodepyGroup. + cat_dim = None + for cnode in node.node_cpps: + if cnode.kind == CAT_TYPE: + cat_dim = list(cnode.inputs())[1].toIValue() + break if cat_dim != 1: parent_convs = self.get_parent_convs(node) dependency_set = set(parent_convs) @@ -94,6 +103,7 @@ def build_channel_dependency(self): for _node in dependency_set: self.dependency[_node] = dependency_set + def filter_prune_check(self, ratios): """ According to the channel dependencies between the conv @@ -116,20 +126,20 @@ def filter_prune_check(self, ratios): ------- True/False """ - - for node in self.cnodes: - if node.kind() == CONV_TYPE and self.c2py[node].name in ratios: - if node not in self.dependency: + for node in self.graph.nodes_py.nodes_op: + if node.op_type == 'Conv2d' and node.name in ratios: + if node.name not in self.dependency: # this layer has no dependency on other layers # it's legal to set any prune ratio between 0 and 1 continue - for other in self.dependency[node]: - if self.c2py[other].name not in ratios: + for other in self.dependency[node.name]: + if other not in ratios: return False - elif ratios[self.c2py[node].name] != ratios[self.c2py[other].name]: + elif ratios[other] != ratios[node.name]: return False return True + def export(self, filepath): """ export the channel dependencies as a csv file. @@ -140,18 +150,18 @@ def export(self, filepath): with open(filepath, 'w') as csvf: csv_w = csv.writer(csvf, delimiter=',') csv_w.writerow(header) - for node in self.cnodes: - if node.kind() != CONV_TYPE or node in visited: + for node in self.graph.nodes_py.nodes_op: + if node.op_type() != 'Conv2d' or node in visited: continue setid += 1 row = ['Set %d' % setid] - if node not in self.dependency: + if node.name not in self.dependency: visited.add(node) - row.append(self.c2py[node].name) + row.append(node.name) else: for other in self.dependency[node]: - visited.add(other) - row.append(self.c2py[other].name) + visited.add(self.graph.name_to_node[other]) + row.append(other) csv_w.writerow(row) @property @@ -168,16 +178,16 @@ def dependency_sets(self): """ d_sets = [] visited = set() - for node in self.cnodes: - if node.kind() != CONV_TYPE or node in visited: + for node in self.graph.nodes_py.nodes_op: + if node.op_type != 'Conv2d' or node in visited: continue tmp_set = set() - if node not in self.dependency: + if node.name not in self.dependency: visited.add(node) - tmp_set.add(self.c2py[node].name) + tmp_set.add(node.name) else: - for other in self.dependency[node]: - visited.add(other) - tmp_set.add(self.c2py[other].name) + for other in self.dependency[node.name]: + visited.add(self.graph.name_to_node[other]) + tmp_set.add(other) d_sets.append(tmp_set) return d_sets From a214bb8c0eaec3c2ebf07c52d6b72c962f7bbc8e Mon Sep 17 00:00:00 2001 From: Ningxin Date: Wed, 10 Jun 2020 02:53:33 +0000 Subject: [PATCH 28/47] refactor the compression utils. Signed-off-by: Ningxin --- .../compression/analysis_utils/__init__.py | 2 - .../analysis_utils/sensitivity/__init__.py | 2 - .../sensitivity/torch/__init__.py | 4 - .../analysis_utils/topology/__init__.py | 2 - .../analysis_utils/topology/torch/__init__.py | 2 - .../topology/torch/graph_from_trace.py | 321 ------------------ .../torch => torch/utils}/mask_conflict.py | 0 .../utils}/sensitivity_analysis.py | 0 .../torch => torch/utils}/shape_dependency.py | 0 9 files changed, 333 deletions(-) delete mode 100644 src/sdk/pynni/nni/compression/analysis_utils/__init__.py delete mode 100644 src/sdk/pynni/nni/compression/analysis_utils/sensitivity/__init__.py delete mode 100644 src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/__init__.py delete mode 100644 src/sdk/pynni/nni/compression/analysis_utils/topology/__init__.py delete mode 100644 src/sdk/pynni/nni/compression/analysis_utils/topology/torch/__init__.py delete mode 100644 src/sdk/pynni/nni/compression/analysis_utils/topology/torch/graph_from_trace.py rename src/sdk/pynni/nni/compression/{analysis_utils/topology/torch => torch/utils}/mask_conflict.py (100%) rename src/sdk/pynni/nni/compression/{analysis_utils/sensitivity/torch => torch/utils}/sensitivity_analysis.py (100%) rename src/sdk/pynni/nni/compression/{analysis_utils/topology/torch => torch/utils}/shape_dependency.py (100%) diff --git a/src/sdk/pynni/nni/compression/analysis_utils/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/__init__.py deleted file mode 100644 index 0eca6426d9..0000000000 --- a/src/sdk/pynni/nni/compression/analysis_utils/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. \ No newline at end of file diff --git a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/__init__.py deleted file mode 100644 index 9a0454564d..0000000000 --- a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. diff --git a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/__init__.py deleted file mode 100644 index f8093828b6..0000000000 --- a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -from .sensitivity_analysis import SensitivityAnalysis diff --git a/src/sdk/pynni/nni/compression/analysis_utils/topology/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/__init__.py deleted file mode 100644 index 0eca6426d9..0000000000 --- a/src/sdk/pynni/nni/compression/analysis_utils/topology/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. \ No newline at end of file diff --git a/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/__init__.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/__init__.py deleted file mode 100644 index 0eca6426d9..0000000000 --- a/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. \ No newline at end of file diff --git a/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/graph_from_trace.py b/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/graph_from_trace.py deleted file mode 100644 index 8100dfbe46..0000000000 --- a/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/graph_from_trace.py +++ /dev/null @@ -1,321 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -import re -import os -import csv -import logging -import torch -import torch.jit as jit -import graphviz - - -__all__ = ["VisualGraph"] - -TUPLE_UNPACK = 'prim::TupleUnpack' -CLASSTYPE_KIND = 'ClassType' -logger = logging.getLogger('Graph_From_Trace') - - -class PyNode: - def __init__(self, cnode, isValue=False): - self.cnode = cnode - self.isValue = isValue - self.isTensor = False - self.isOp = not self.isValue - if self.isValue: - if isinstance(self.cnode.type(), torch._C.TensorType): - self.isTensor = True - self.shape = self.cnode.type().sizes() - if self.isOp: - scopename = cnode.scopeName() - if torch.__version__ >= '1.4.0': - # note, the scopeName of node may be empty - scopename = re.split('/', scopename) - self.name = scopename[-1] if scopename else '' - else: - self.name = '.'.join(re.findall(r'\[(.*?)\]', scopename)) - # remove the __module prefix - if self.name.startswith('__module.'): - self.name = self.name[len('__module.'):] - - def __str__(self): - if self.isTensor: - _str = 'Tensor: {}'.format(self.shape) - elif self.isOp: - op_type = self.cnode.kind() - op_type = re.split('::', op_type)[1] - _str = self.name + '\nType: ' + op_type - else: - _str = str(self.cnode.type()) - return _str - - def parents(self): - if self.isOp: - return list(self.cnode.inputs()) - else: - return [self.cnode.node()] - - -class VisualGraph: - def __init__(self, model=None, data=None, graph=None): - """ - We build the network architecture graph according the graph - in the scriptmodule. However, the original graph from jit.trace - has lots of detailed information which make the graph complicated - and hard to understand. So we also store a copy of the network - architecture in the self.forward_edge. We will simplify the network - architecure (such as unpack_tuple, etc) stored in self.forward_edge - to make the graph more clear. - Parameters - ---------- - model : torch.nn.Module - The model to build the network architecture. - data : torch.Tensor - The sample input data for the model. - graph : torch._C.Graph - Traced graph from jit.trace, if this option is set, - we donnot need to trace the model again. - """ - self.model = model - self.data = data - if graph is not None: - self.graph = graph - elif (model is not None) and (data is not None): - with torch.onnx.set_training(model, False): - self.traced_model = jit.trace(model, data) - self.graph = self.traced_model.graph - torch._C._jit_pass_inline(self.graph) - else: - raise Exception('Input parameters invalid!') - self.forward_edge = {} - self.c2py = {} - self.visited = set() - self.build_graph() - self.unpack_tuple() - - def unpack_tuple(self): - """ - jit.trace also traces the tuple creation and unpack, which makes - the grapgh complex and difficult to understand. Therefore, we - unpack the tuple handly to make the graph clear. - """ - parent_node = None - for node in self.graph.nodes(): - if node.kind() == TUPLE_UNPACK: - in_tuple = list(node.inputs())[0] - parent_node = in_tuple.node() - in_tensors = list(parent_node.inputs()) - out_tensors = list(node.outputs()) - assert len(in_tensors) == len(out_tensors) - for i, _ in enumerate(in_tensors): - ori_edges = self.forward_edge[in_tensors[i]] - # remove the out edge to the Tuple_construct OP node - self.forward_edge[in_tensors[i]] = list( - filter(lambda x: x != parent_node, ori_edges)) - # Directly connect to the output nodes of the out_tensors - self.forward_edge[in_tensors[i]].extend( - self.forward_edge[out_tensors[i]]) - - def build_graph(self): - """ - Copy the architecture information from the traced_model into - forward_edge. - """ - for node in self.graph.nodes(): - self.c2py[node] = PyNode(node) - for _input in node.inputs(): - if _input not in self.c2py: - self.c2py[_input] = PyNode(_input, True) - if _input in self.forward_edge: - self.forward_edge[_input].append(node) - else: - self.forward_edge[_input] = [node] - for output in node.outputs(): - if output not in self.c2py: - self.c2py[output] = PyNode(output, True) - if node in self.forward_edge: - self.forward_edge[node].append(output) - else: - self.forward_edge[node] = [output] - - def visual_traverse(self, curnode, graph, lastnode, cfg): - """" - Traverse the network and draw the nodes and edges - at the same time. - Parameters - ---------- - curnode : torch._C.Node - Current visiting node(tensor/module). - graph : graphviz.dot.Digraph - The handle of the Dgraph. - lastnode : torch._C.Node - The last visited node. - cfg : dict - Dict object to specify the rendering - configuration for operation node. - key is the name of the operation, - value is a also a dict. For example, - {'conv1': {'shape':'box', 'color':'red'}} - """ - if curnode in self.visited: - if lastnode is not None: - graph.edge(str(id(lastnode)), str(id(curnode))) - return - self.visited.add(curnode) - tmp_str = str(self.c2py[curnode]) - if self.c2py[curnode].isOp: - name = self.c2py[curnode].name - # default render configuration - render_cfg = {'shape': 'ellipse', 'style': 'solid'} - if name in cfg: - render_cfg = cfg[name] - graph.node(str(id(curnode)), tmp_str, **render_cfg) - else: - graph.node(str(id(curnode)), tmp_str, shape='box', - color='lightblue', style='dashed') - if lastnode is not None: - graph.edge(str(id(lastnode)), str(id(curnode))) - if curnode in self.forward_edge: - for _next in self.forward_edge[curnode]: - self.visual_traverse(_next, graph, curnode, cfg) - - def base_visualization(self, filename, save_format='jpg', cfg=None): - """ - visualize the network architecture automaticlly. - Parameters - ---------- - filename : str - The filename of the saved image file. - save_format : str - The output save_format. - """ - # TODO and detailed mode for the visualization function - # in which the graph will also contain all the weights/bias - # information. - if not cfg: - cfg = {} - graph = graphviz.Digraph(format=save_format) - self.visited.clear() - for _input in self.graph.inputs(): - if _input.type().kind() == CLASSTYPE_KIND: - continue - self.visual_traverse(_input, graph, None, cfg) - graph.render(filename) - - def visualize_with_flops(self, filepath, save_format, flops_file): - assert os.path.exists(flops_file) - f_handle = open(flops_file, 'r') - csv_r = csv.reader(f_handle) - flops = {} - # skip the header of the csv file - _ = next(csv_r) - for row in csv_r: - if len(row) == 2: - layername = row[0] - _flops = float(row[1]) - flops[layername] = _flops - - f_handle.close() - # Divide the flops of the layers into 11 levels - # We use the 'rdylgn11 color scheme' to present - # the number of the flops, in which we have 11 colors - # range from green to red. - _min_flops = min(flops.values()) - _max_flops = max(flops.values()) - color_scheme_count = 9 - flops_step = (_max_flops - _min_flops) / (color_scheme_count-1) - - cfgs = {} - for layername in flops: - flops_level = (flops[layername] - _min_flops) / flops_step - # flops_level = color_scheme_count - int(round(flops_level)) - flops_level = int(round(flops_level)) + 1 - render_cfg = render_cfg = {'shape': 'ellipse', - 'fillcolor': "/reds9/"+str(flops_level), 'style': 'filled'} - cfgs[layername] = render_cfg - self.base_visualization(filepath, save_format=save_format, cfg=cfgs) - - def visualize_with_dependency(self, filepath, save_format, dependency_file): - assert os.path.exists(dependency_file) - f_handle = open(dependency_file, 'r') - csv_r = csv.reader(f_handle) - # skip the header of the csv file - _ = next(csv_r) - dependency_sets = [] - for row in csv_r: - tmp_set = set() - for i in range(1, len(row)): - tmp_set.add(row[i]) - dependency_sets.append(tmp_set) - f_handle.close() - # Create the render configs, assign the same color for the - # same dependency set - cfgs = {} - colorid = 0 - for tmp_set in dependency_sets: - if len(tmp_set) == 1: - # This layer has no dependency - continue - colorid = (colorid + 1) % 12 - str_color = "/paired12/%d" % (colorid + 1) - for layername in tmp_set: - render_cfg = {'shape': 'ellipse', - 'fillcolor': str_color, 'style': 'filled'} - cfgs[layername] = render_cfg - self.base_visualization(filepath, save_format=save_format, cfg=cfgs) - - def visualize_with_sensitivity(self, filepath, save_format, sensitivity_file): - assert os.path.exists(sensitivity_file) - f_handle = open(sensitivity_file, 'r') - csv_r = csv.reader(f_handle) - header = next(csv_r) - # sparsities is ordered in sensitivity analysis - sparsities = [float(x) for x in header[1:]] - sensitivity = {} - for row in csv_r: - layername = row[0] - accs = [float(_acc) for _acc in row[1:]] - sensitivity[layername] = accs - f_handle.close() - # Note: Due to the early stop in SensitivityAnalysis, the number of - # accuracies of different sparsities may be different. The earlier - # the layers stops, the higher the sensitivity is. - cfgs = {} - color_scheme_count = 9 - for layername in sensitivity: - _max = sparsities[len(sensitivity[layername]) - 1] - _max_all = max(sparsities) - level = 1.0 - (_max / _max_all) # [0, 1] - level = int(color_scheme_count * level) # [0, 9] - # color number start from 1 - if level == 0: - level = 1 - str_color = "/reds9/%d" % level - render_cfg = {'shape': 'ellipse', - 'fillcolor': str_color, 'style': 'filled'} - cfgs[layername] = render_cfg - self.base_visualization(filepath, save_format=save_format, cfg=cfgs) - - def visualization(self, filename, save_format='jpg', - flops_file=None, - sensitivity_file=None, - dependency_file=None): - - # First, visualize the network architecture only - self.base_visualization(filename, save_format=save_format) - # if the flops file is specified, we also render - # a image with the flops information. - if flops_file is not None: - flops_img = filename + '_flops' - self.visualize_with_flops(flops_img, save_format, flops_file) - - if dependency_file is not None: - dependency_img = filename + '_dependency' - self.visualize_with_dependency( - dependency_img, save_format, dependency_file) - - if sensitivity_file is not None: - sensitivity_img = filename + '_sensitivity' - self.visualize_with_sensitivity( - sensitivity_img, save_format, sensitivity_file) diff --git a/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/mask_conflict.py b/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py similarity index 100% rename from src/sdk/pynni/nni/compression/analysis_utils/topology/torch/mask_conflict.py rename to src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py diff --git a/src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py similarity index 100% rename from src/sdk/pynni/nni/compression/analysis_utils/sensitivity/torch/sensitivity_analysis.py rename to src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py diff --git a/src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py similarity index 100% rename from src/sdk/pynni/nni/compression/analysis_utils/topology/torch/shape_dependency.py rename to src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py From 6d1a546b8880edcec33dda42067f1466a0b6a958 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Wed, 10 Jun 2020 03:55:41 +0000 Subject: [PATCH 29/47] Update the corresponding unit test. Signed-off-by: Ningxin --- .../compression/torch/utils/shape_dependency.py | 15 +++++++-------- src/sdk/pynni/tests/test_analysis_utils.py | 5 ++--- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py index 3564866277..3bfc66a4f4 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py +++ b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py @@ -5,8 +5,8 @@ import torch import logging -import nni._graph_utils as _graph_utils -from _graph_utils import TorchModuleGraph +from nni._graph_utils import TorchModuleGraph + CONV_TYPE = 'aten::_convolution' ADD_TYPES = ['aten::add', 'aten::add_'] @@ -35,9 +35,8 @@ def __init__(self, model=None, dummy_input=None, traced_model=None): # user should provide model & dummy_input to trace the model or a already traced model assert model is not None and dummy_input is not None self.graph = TorchModuleGraph(model, dummy_input, traced_model) + self.dependency = dict() self.build_channel_dependency() - self.dependency = {} - def get_parent_convs(self, node): """ @@ -58,7 +57,7 @@ def get_parent_convs(self, node): queue.append(node) while queue: curnode = queue.pop(0) - if node.op_type == 'Conv2d': + if curnode.op_type == 'Conv2d': # find the first met conv parent_convs.append(curnode.name) continue @@ -89,7 +88,7 @@ def build_channel_dependency(self): # NodepyGroup. cat_dim = None for cnode in node.node_cpps: - if cnode.kind == CAT_TYPE: + if cnode.kind() == CAT_TYPE: cat_dim = list(cnode.inputs())[1].toIValue() break if cat_dim != 1: @@ -151,7 +150,7 @@ def export(self, filepath): csv_w = csv.writer(csvf, delimiter=',') csv_w.writerow(header) for node in self.graph.nodes_py.nodes_op: - if node.op_type() != 'Conv2d' or node in visited: + if node.op_type != 'Conv2d' or node in visited: continue setid += 1 row = ['Set %d' % setid] @@ -159,7 +158,7 @@ def export(self, filepath): visited.add(node) row.append(node.name) else: - for other in self.dependency[node]: + for other in self.dependency[node.name]: visited.add(self.graph.name_to_node[other]) row.append(other) csv_w.writerow(row) diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_analysis_utils.py index c2b0285530..7f6d56657d 100644 --- a/src/sdk/pynni/tests/test_analysis_utils.py +++ b/src/sdk/pynni/tests/test_analysis_utils.py @@ -10,9 +10,8 @@ import numpy as np from nni.compression.torch import L1FilterPruner -from nni.compression.analysis_utils.topology.torch.graph_from_trace import VisualGraph -from nni.compression.analysis_utils.topology.torch.shape_dependency import ChannelDependency -from nni.compression.analysis_utils.topology.torch.mask_conflict import MaskConflict +from nni.compression.torch.utils.shape_dependency import ChannelDependency +from nni.compression.torch.utils.mask_conflict import MaskConflict device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') prefix = 'analysis_test' From 3aeb8a23eae190b7a585995e9a679a079aadba08 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Wed, 10 Jun 2020 05:17:17 +0000 Subject: [PATCH 30/47] Remove the visualization modules and related dependencies. Signed-off-by: Ningxin --- deployment/pypi/setup.py | 4 +- setup.py | 4 +- .../torch/utils/sensitivity_analysis.py | 62 ------------------- src/sdk/pynni/requirements.txt | 4 -- src/sdk/pynni/setup.py | 4 +- 5 files changed, 3 insertions(+), 75 deletions(-) diff --git a/deployment/pypi/setup.py b/deployment/pypi/setup.py index 356612db62..3c2d433790 100644 --- a/deployment/pypi/setup.py +++ b/deployment/pypi/setup.py @@ -62,9 +62,7 @@ 'scipy', 'coverage', 'colorama', - 'scikit-learn>=0.20,<0.22', - 'graphviz', - 'matplotlib' + 'scikit-learn>=0.20,<0.22' ], classifiers = [ 'Programming Language :: Python :: 3', diff --git a/setup.py b/setup.py index 1ddd057873..8a3733776f 100644 --- a/setup.py +++ b/setup.py @@ -40,9 +40,7 @@ def read(fname): 'schema', 'PythonWebHDFS', 'colorama', - 'scikit-learn>=0.20,<0.22', - 'graphviz', - 'matplotlib' + 'scikit-learn>=0.20,<0.22' ], entry_points = { diff --git a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py index ad969aa131..650b964773 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py @@ -6,9 +6,6 @@ import csv import logging from collections import OrderedDict -import matplotlib -import matplotlib.pyplot as plt -from matplotlib.lines import Line2D import numpy as np import torch.nn as nn @@ -17,8 +14,6 @@ from nni.compression.torch import L1FilterPruner from nni.compression.torch import L2FilterPruner -# use Agg backend -matplotlib.use('Agg') SUPPORTED_OP_NAME = ['Conv2d', 'Conv1d'] SUPPORTED_OP_TYPE = [getattr(nn, name) for name in SUPPORTED_OP_NAME] @@ -172,63 +167,6 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): return self.sensitivities - def visualization(self, outdir, merge=False): - """ - Visualize the sensitivity curves of the model - - Parameters - ---------- - outdir : str - output directory of the image - merge : bool - if merge all the sensitivity curves into a - single image. If not, we will draw a picture - for each target layer of the model. - """ - os.makedirs(outdir, exist_ok=True) - LineStyles = [':', '-.', '--', '-'] - Markers = list(Line2D.markers.keys()) - if not merge: - # Draw the sensitivity curves for each layer first - for name in self.sensitivities: - X = list(self.sensitivities[name].keys()) - X = sorted(X) - Y = [self.sensitivities[name][x] for x in X] - if 0.00 not in X: - # add the original accuracy into the figure - X = [0.00] + X - Y = [self.ori_acc] + Y - plt.figure(figsize=(8, 4)) - plt.plot(X, Y, marker='*') - plt.xlabel('Prune Ratio') - plt.ylabel('Validation Accuracy') - plt.title(name) - plt.tight_layout() - filepath = os.path.join(outdir, '%s.jpg' % name) - plt.savefig(filepath) - plt.close() - else: - plt.figure() - styleid = 0 - for name in self.sensitivities: - X = list(self.sensitivities[name].keys()) - X = sorted(X) - Y = [self.sensitivities[name][x] for x in X] - if 0.00 not in X: - # add the original accuracy into the figure - X = [0.00] + X - Y = [self.ori_acc] + Y - linestyle = LineStyles[styleid % len(LineStyles)] - marker = Markers[styleid % len(Markers)] - plt.plot(X, Y, label=name, linestyle=linestyle, marker=marker) - plt.xlabel('Prune Ratio') - plt.ylabel('Validation Accuracy') - plt.legend(loc='center left', bbox_to_anchor=(1.02, 0.5)) - plt.tight_layout() - filepath = os.path.join(outdir, 'all.jpg') - plt.savefig(filepath, dpi=1000, bbox_inches='tight') - styleid += 1 - plt.close() def export(self, filepath): """ diff --git a/src/sdk/pynni/requirements.txt b/src/sdk/pynni/requirements.txt index b7d9b0cf90..885c909ca8 100644 --- a/src/sdk/pynni/requirements.txt +++ b/src/sdk/pynni/requirements.txt @@ -8,7 +8,3 @@ hyperopt==0.1.2 # metis tuner scikit-learn==0.20 - -# analysis_utils -graphviz -matplotlib diff --git a/src/sdk/pynni/setup.py b/src/sdk/pynni/setup.py index 41a603023b..4e006a9014 100644 --- a/src/sdk/pynni/setup.py +++ b/src/sdk/pynni/setup.py @@ -18,9 +18,7 @@ def read(fname): 'json_tricks', 'numpy', 'scipy', - 'coverage', - 'graphviz', - 'matplotlib' + 'coverage' ], package_data = {'nni': ['**/requirements.txt']}, From bf72f3d22ba7ca401f1cd4e380cc128872237167 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Wed, 10 Jun 2020 07:10:23 +0000 Subject: [PATCH 31/47] update --- .../pynni/nni/compression/torch/utils/sensitivity_analysis.py | 1 - src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py | 1 - 2 files changed, 2 deletions(-) diff --git a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py index 650b964773..722d91ec53 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. -import os import copy import csv import logging diff --git a/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py index 3bfc66a4f4..b3c348ea91 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py +++ b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py @@ -2,7 +2,6 @@ # Licensed under the MIT license. import csv -import torch import logging from nni._graph_utils import TorchModuleGraph From caced256760b870a98a44fab78c5f5c42867e662 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Thu, 11 Jun 2020 01:12:50 +0000 Subject: [PATCH 32/47] Update the docs. Signed-off-by: Ningxin --- .../en_US/Compressor/{AnalysisUtils.md => CompressionUtils.md} | 0 docs/en_US/model_compression.rst | 2 +- docs/en_US/sdk_reference.rst | 3 ++- 3 files changed, 3 insertions(+), 2 deletions(-) rename docs/en_US/Compressor/{AnalysisUtils.md => CompressionUtils.md} (100%) diff --git a/docs/en_US/Compressor/AnalysisUtils.md b/docs/en_US/Compressor/CompressionUtils.md similarity index 100% rename from docs/en_US/Compressor/AnalysisUtils.md rename to docs/en_US/Compressor/CompressionUtils.md diff --git a/docs/en_US/model_compression.rst b/docs/en_US/model_compression.rst index 1b6137841f..e76d18a613 100644 --- a/docs/en_US/model_compression.rst +++ b/docs/en_US/model_compression.rst @@ -22,4 +22,4 @@ For details, please refer to the following tutorials: Model Speedup Automatic Model Compression Implementation - AnalysisUtils + CompressionUtils diff --git a/docs/en_US/sdk_reference.rst b/docs/en_US/sdk_reference.rst index 49d47a2ffd..08c5753381 100644 --- a/docs/en_US/sdk_reference.rst +++ b/docs/en_US/sdk_reference.rst @@ -7,4 +7,5 @@ Python API Reference :maxdepth: 1 Auto Tune - NAS \ No newline at end of file + NAS + CompressionUtils \ No newline at end of file From c0e93e58317cf60880ac4c368d77f39cf47c63f4 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Fri, 12 Jun 2020 00:05:15 +0000 Subject: [PATCH 33/47] update docs. Signed-off-by: Ningxin --- docs/en_US/Compressor/CompressionReference.md | 23 +++++++++++++++++++ docs/en_US/model_compression.rst | 2 +- docs/en_US/sdk_reference.rst | 2 +- 3 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 docs/en_US/Compressor/CompressionReference.md diff --git a/docs/en_US/Compressor/CompressionReference.md b/docs/en_US/Compressor/CompressionReference.md new file mode 100644 index 0000000000..f24003e625 --- /dev/null +++ b/docs/en_US/Compressor/CompressionReference.md @@ -0,0 +1,23 @@ +# Python API Reference of Compression Utilities + +```eval_rst +.. contents:: +``` + +## Sensitivity Utilities + +```eval_rst +.. autoclass:: nni.compression.torch.utils.sensitivity_analysis + :members: + +``` + +## Topology Utilities + +```eval_rst +.. autoclass:: nni.compression.torch.utils.shape_dependency + :members: + +.. autoclass:: nni.compression.torch.utils.mask_conflict.py + :members: +``` diff --git a/docs/en_US/model_compression.rst b/docs/en_US/model_compression.rst index e76d18a613..f6821e3045 100644 --- a/docs/en_US/model_compression.rst +++ b/docs/en_US/model_compression.rst @@ -22,4 +22,4 @@ For details, please refer to the following tutorials: Model Speedup Automatic Model Compression Implementation - CompressionUtils + Compression Utilities diff --git a/docs/en_US/sdk_reference.rst b/docs/en_US/sdk_reference.rst index 08c5753381..2602e257b9 100644 --- a/docs/en_US/sdk_reference.rst +++ b/docs/en_US/sdk_reference.rst @@ -8,4 +8,4 @@ Python API Reference Auto Tune NAS - CompressionUtils \ No newline at end of file + Compression Utilities \ No newline at end of file From 6d7ea88b3209146123527336d7a35ad20da07447 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Fri, 12 Jun 2020 02:23:07 +0000 Subject: [PATCH 34/47] update docs. Signed-off-by: Ningxin --- docs/en_US/Compressor/CompressionReference.md | 6 +++--- .../nni/compression/torch/utils/sensitivity_analysis.py | 2 -- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/en_US/Compressor/CompressionReference.md b/docs/en_US/Compressor/CompressionReference.md index f24003e625..c190a46eb6 100644 --- a/docs/en_US/Compressor/CompressionReference.md +++ b/docs/en_US/Compressor/CompressionReference.md @@ -7,7 +7,7 @@ ## Sensitivity Utilities ```eval_rst -.. autoclass:: nni.compression.torch.utils.sensitivity_analysis +.. autoclass:: nni.compression.torch.utils.sensitivity_analysis.SensitivityAnalysis :members: ``` @@ -15,9 +15,9 @@ ## Topology Utilities ```eval_rst -.. autoclass:: nni.compression.torch.utils.shape_dependency +.. autoclass:: nni.compression.torch.utils.shape_dependency.ChannelDependency :members: -.. autoclass:: nni.compression.torch.utils.mask_conflict.py +.. autoclass:: nni.compression.torch.utils.mask_conflict.MaskConflict :members: ``` diff --git a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py index 722d91ec53..5fa836ba6b 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py @@ -108,8 +108,6 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): args for the val_function val_kwargs : dict kwargs for the val_funtion - The val_funtion will be called as: - val_function(*val_args, **val_kwargs). start : int Layer index of the sensitivity analysis start. end : int From e7790a2c9bd7f725c9da40a51e1d05ff29750288 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Fri, 12 Jun 2020 03:32:08 +0000 Subject: [PATCH 35/47] update docs --- docs/en_US/Compressor/CompressionUtils.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index a26e51bab0..044871f41c 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -8,7 +8,7 @@ First, we provide a sensitivity analysis tool (**SensitivityAnalysis**) for user Following codes show the basic usage of the SensitivityAnalysis. ```python -from nni.compression.analysis_utils.sensitivity.torch.sensitivity_analysis import SensitivityAnalysis +from nni.compression.torch.utils.sensitivity_analysis import SensitivityAnalysis def val(model): model.eval() @@ -27,12 +27,10 @@ s_analyzer = SensitivityAnalysis(model=net, val_func=val) sensitivity = s_analyzer.analysis(val_args=[net]) os.makedir(outdir) s_analyzer.export(os.path.join(outdir, filename)) -s_analyzer.visualization(outdir, merge=True) -s_analyzer.visualization(outdir, merge=False) ``` Two key parameters of SensitivityAnalysis are model, and val_func. 'model' is the neural network that to be analyzed and the 'val_func' is the validation function that returns the model accuracy on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy on the dataset and pass it to SensitivityAnalysis. -SensitivityAnalysis can export the sensitivity results as a csv file and visualization the sensitivity results of each layer in the model, usage is shown in the example above. +SensitivityAnalysis can export the sensitivity results as a csv file usage is shown in the example above. Futhermore, users can specify the sparsities values used to prune for each layer by optinal parameter 'sparsities'. ```python @@ -65,7 +63,7 @@ Complicated models may has residual connection/concat operations in their models #### Usage ```python -from nni.compression.analysis_utils.topology.torch.shape_dependency import ChannelDependency +from nni.compression.torch.utils.shape_dependency import ChannelDependency data = torch.ones(1, 3, 224, 224).cuda() channel_depen = ChannelDependency(net, data) channel_depen.export('dependency.csv') @@ -93,7 +91,7 @@ Set 12,layer4.1.conv1 When the masks of different layers in a model has conflict, we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value. ``` -from analysis_utils.topology.torch.mask_conflict import MaskConflict +from nni.compression.torch.utils.mask_conflict import MaskConflict mc = MaskConflict('./resnet18_mask', net, data) mc.fix_mask_conflict() mc.export('./resnet18_fixed_mask') From b7671da5451875f84ad776b2aaa1595c6790c603 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 15 Jun 2020 06:28:35 +0000 Subject: [PATCH 36/47] Update according the review comments. Signed-off-by: Ningxin --- docs/en_US/Compressor/CompressionUtils.md | 6 +- .../compression/torch/utils/mask_conflict.py | 10 +-- .../torch/utils/sensitivity_analysis.py | 88 +++++++++++++------ .../torch/utils/shape_dependency.py | 73 ++++++--------- 4 files changed, 90 insertions(+), 87 deletions(-) diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index 044871f41c..408ad2c6c6 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -29,7 +29,7 @@ os.makedir(outdir) s_analyzer.export(os.path.join(outdir, filename)) ``` -Two key parameters of SensitivityAnalysis are model, and val_func. 'model' is the neural network that to be analyzed and the 'val_func' is the validation function that returns the model accuracy on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy on the dataset and pass it to SensitivityAnalysis. +Two key parameters of SensitivityAnalysis are model, and val_func. 'model' is the neural network that to be analyzed and the 'val_func' is the validation function that returns the model accuracy/loss/ or other metrics on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy/loss on the dataset and pass it to SensitivityAnalysis. SensitivityAnalysis can export the sensitivity results as a csv file usage is shown in the example above. Futhermore, users can specify the sparsities values used to prune for each layer by optinal parameter 'sparsities'. @@ -38,7 +38,7 @@ s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, ``` the SensitivityAnalysis will prune 25% 50% 75% weights gradually for each layer, and record the model's accuracy at the same time (SensitivityAnalysis only prune a layer once a time, the other layers are set to their original weights). If the sparsities is not set, SensitivityAnalysis will use the numpy.arange(0.1, 1.0, 0.1) as the default sparsity values. -Users can also speed up the progress of sensitivity analysis by the early_stop option. By default, the SensitivityAnalysis will test the accuracy under all sparsities for each layer. In contrast, when the early_stop is set, the sensitivity analysis for a layer will stop, when the accuracy drop already reaches the threshold set by early_stop. +Users can also speed up the progress of sensitivity analysis by the early_stop/min_threshold/max_threshold option. By default, the SensitivityAnalysis will test the accuracy under all sparsities for each layer. In contrast, when the early_stop is set, the sensitivity analysis for a layer will stop, when the accuracy/loss has already droped/raised the value of early_stop. If the min_threshold/max_threshold is set, when the validation metric returned by the val_func is lower/larger than the threshold, the sensitivity analysis will stop. ```python s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75], early_stop=0.1) ``` @@ -59,7 +59,7 @@ features.10,0.55468,0.5394,0.49576,0.4291,0.3591,0.28138,0.14256,0.05446,0.01578 We also provide several tools for the topology analysis during the model compression. ### ChannelDependency -Complicated models may has residual connection/concat operations in their models. When the user prune these models, they need to be careful about the channel-count dependencies between the convolution layers in the model. If the layers has channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then even the pruned model with mask works fine. but the pruned model cannot be speedup to the final model that run on the devices, because there will be a shape conflict when the model try to add/concat the outputs of these layers. This model is to find the layers that has channel count dependencies to help user better prune their model. +Complicated models may has residual connection/concat operations in their models. When the user prune these models, they need to be careful about the channel-count dependencies between the convolution layers in the model. If the layers has channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then even the pruned model with mask works fine. but the pruned model cannot be speedup to the final model that run on the devices, because there will be a shape conflict when the model try to add/concat the outputs of these layers. This tool is to find the layers that has channel count dependencies to help user better prune their model. #### Usage ```python diff --git a/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py b/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py index 98b798e761..70ce9aa49c 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py +++ b/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py @@ -60,14 +60,8 @@ def fix_mask_conflict(self): w_mask = self.masks[name]['weight'] shape = w_mask.size() count = np.prod(shape[1:]) - all_ones = [] - all_zeros = [] - for i in range(w_mask.size(0)): - _count = torch.sum(w_mask[i]) - if _count == count: - all_ones.append(i) - elif _count == 0: - all_zeros.append(i) + all_ones = (w_mask.flatten(1).sum(-1)==count).nonzero().squeeze(1).tolist() + all_zeros = (w_mask.flatten(1).sum(-1)==0).nonzero().squeeze(1).tolist() if len(all_ones) + len(all_zeros) < w_mask.size(0): # In fine-grained pruning, there is no need to check # the shape conflict diff --git a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py index 5fa836ba6b..a34acb0de7 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py @@ -21,7 +21,7 @@ class SensitivityAnalysis: - def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop=1.0): + def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop=None, min_threshold=None, max_threshold=None): """ Perform sensitivity analysis for this model. Parameters @@ -37,23 +37,28 @@ def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop There are no restrictions on the input parameters of the val_function. User can use the val_args, val_kwargs parameters in analysis to pass all the parameters that val_func needed. - sparsities : list The sparsity list provided by users. This parameter is set when the user only wants to test some specific sparsities. In the sparsity list, each element is a sparsity value which means how much weight the pruner should prune. Take [0.25, 0.5, 0.75] for an example, the SensitivityAnalysis will prune 25% 50% 75% weights gradually for each layer. - prune_type : str The pruner type used to prune the conv layers, default is 'l1', and 'l2', 'fine-grained' is also supported. early_stop : float If this flag is set, the sensitivity analysis - for a conv layer will early stop when the accuracy - drop already reach the value of early_stop (0.05 for example). - The default value is 1.0, which means the analysis won't stop + for a conv layer will early stop when the validation metric( + for example, accurracy/loss) has alreay droped/raised the value + of early_stop (0.05 for example). + The default value is None, which means the analysis won't stop until all given sparsities are tested. + min_threshold : float + If the validation metric returned by the val_func is lower + than min_threshold, the sensitivity analysis will stop. + max_threshold : float + if the validation metric returned by the val_func is larger + than max_threshold, the sensitivity analysis will stop. """ self.model = model @@ -73,7 +78,9 @@ def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop elif prune_type == 'fine-grained': self.Pruner = LevelPruner self.early_stop = early_stop - self.ori_acc = None # original accuracy for the model + self.min_threshold = min_threshold + self.max_threshold = max_threshold + self.ori_metric = None # original validation metric for the model # already_pruned is for the iterative sensitivity analysis # For example, sensitivity_pruner iteratively prune the target # model according to the sensitivity. After each round of @@ -93,7 +100,34 @@ def model_parse(self): self.target_layer[name] = submodel self.already_pruned[name] = 0 - def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): + def _need_to_stop(self, ori_metric, cur_metric): + """ + Judge if meet the stop conditon(early_stop, min_threshold, + max_threshold). + Parameters + ---------- + ori_metric : float + original validation metric + cur_metric : float + current validation metric + + Returns + ------- + stop : bool + if stop the sensitivity analysis + """ + if self.early_stop is not None: + if abs(ori_metric - cur_metric) >= self.early_stop: + return True + if self.min_threshold is not None: + if cur_metric < self.min_threshold: + return True + if self.max_threshold is not None: + if cur_metric > self.max_threshold: + return True + return False + + def analysis(self, val_args=None, val_kwargs=None, specified_layers=None): """ This function analyze the sensitivity to pruning for each conv layer in the target model. @@ -108,31 +142,30 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): args for the val_function val_kwargs : dict kwargs for the val_funtion - start : int - Layer index of the sensitivity analysis start. - end : int - Layer index of the sensitivity analysis end. - + specified_layers : list + list of layer names to analyze sensitivity. + If this variable is set, then only analyze + the conv layers in specified in the list. + User can also use this option to parallelize + the sensitivity analysis easily. Returns ------- sensitivities : dict dict object that stores the trajectory of the accuracy when the prune ratio changes """ - if not end: - end = self.layers_count - assert start >= 0 and end <= self.layers_count - assert start <= end if val_args is None: val_args = [] if val_kwargs is None: val_kwargs = {} - # Get the validation accuracy before pruning - if self.ori_acc is None: - self.ori_acc = self.val_func(*val_args, **val_kwargs) + # Get the original validation metric(accuracy) before pruning + if self.ori_metric is None: + self.ori_metric = self.val_func(*val_args, **val_kwargs) namelist = list(self.target_layer.keys()) - for layerid in range(start, end): - name = namelist[layerid] + if specified_layers is not None: + # only analyze several specified conv layers + namelist = list(filter(lambda x: x in specified_layers, namelist)) + for name in namelist: self.sensitivities[name] = {} for sparsity in self.sparsities: # Calculate the actual prune ratio based on the already pruned ratio @@ -145,15 +178,15 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): name], 'op_types': ['Conv2d']}] pruner = self.Pruner(self.model, cfg) pruner.compress() - val_acc = self.val_func(*val_args, **val_kwargs) + val_metric = self.val_func(*val_args, **val_kwargs) logger.info('Layer: %s Sparsity: %.2f Accuracy: %.4f', - name, sparsity, val_acc) + name, sparsity, val_metric) - self.sensitivities[name][sparsity] = val_acc + self.sensitivities[name][sparsity] = val_metric pruner._unwrap_model() del pruner - # if the accuracy drop already reach the 'early_stop' - if val_acc + self.early_stop < self.ori_acc: + # check if the current metric meet the stop condition + if self._need_to_stop(self.ori_metric, val_metric): break # reset the weights pruned by the pruner, because the @@ -164,7 +197,6 @@ def analysis(self, val_args=None, val_kwargs=None, start=0, end=None): return self.sensitivities - def export(self, filepath): """ Export the results of the sensitivity analysis diff --git a/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py index b3c348ea91..ca888337a8 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py +++ b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py @@ -37,7 +37,7 @@ def __init__(self, model=None, dummy_input=None, traced_model=None): self.dependency = dict() self.build_channel_dependency() - def get_parent_convs(self, node): + def _get_parent_layers(self, node): """ Find the nearest father conv layers for the target node. @@ -48,23 +48,23 @@ def get_parent_convs(self, node): Returns ------- - parent_convs: list - nearest father conv layers for the target worknode. + parent_layers: list + nearest father conv/linear layers for the target worknode. """ - parent_convs = [] + parent_layers = [] queue = [] queue.append(node) while queue: curnode = queue.pop(0) - if curnode.op_type == 'Conv2d': + if curnode.op_type == 'Conv2d' or curnode.op_type == 'Linear': # find the first met conv - parent_convs.append(curnode.name) + parent_layers.append(curnode.name) continue parents = self.graph.find_predecessors(curnode.unique_name) parents = [self.graph.name_to_node[name] for name in parents] for parent in parents: queue.append(parent) - return parent_convs + return parent_layers def build_channel_dependency(self): """ @@ -72,11 +72,11 @@ def build_channel_dependency(self): in the model. """ for node in self.graph.nodes_py.nodes_op: - parent_convs = [] + parent_layers = [] # find the node that contains aten::add # or aten::cat operations if node.op_type in ADD_TYPES: - parent_convs = self.get_parent_convs(node) + parent_layers = self._get_parent_layers(node) elif node.op_type == CAT_TYPE: # To determine if this cat operation will introduce channel # dependency, we need the specific input parameters of the cat @@ -91,10 +91,10 @@ def build_channel_dependency(self): cat_dim = list(cnode.inputs())[1].toIValue() break if cat_dim != 1: - parent_convs = self.get_parent_convs(node) - dependency_set = set(parent_convs) + parent_layers = self._get_parent_layers(node) + dependency_set = set(parent_layers) # merge the dependencies - for parent in parent_convs: + for parent in parent_layers: if parent in self.dependency: dependency_set.update(self.dependency[parent]) # save the dependencies @@ -102,45 +102,22 @@ def build_channel_dependency(self): self.dependency[_node] = dependency_set - def filter_prune_check(self, ratios): - """ - According to the channel dependencies between the conv - layers, check if the filter pruning ratio for the conv - layers is legal. - - Parameters - --------- - ratios : dict - the prune ratios for the layers. %ratios is a dict, - in which the keys are the names of the target layer - and the values are the prune ratio for the corresponding - layers. For example: - ratios = {'body.conv1': 0.5, 'body.conv2':0.5} - Note: the name of the layers should looks like - the names that model.named_modules() functions - returns. - - Returns - ------- - True/False - """ - for node in self.graph.nodes_py.nodes_op: - if node.op_type == 'Conv2d' and node.name in ratios: - if node.name not in self.dependency: - # this layer has no dependency on other layers - # it's legal to set any prune ratio between 0 and 1 - continue - for other in self.dependency[node.name]: - if other not in ratios: - return False - elif ratios[other] != ratios[node.name]: - return False - return True - - def export(self, filepath): """ export the channel dependencies as a csv file. + The layers at the same line have output channel + dependencies with each other. For example, + layer1.1.conv2, conv1, and layer1.0.conv2 have + output channel dependencies with each other, which + means the output channel(filters) numbers of these + three layers should be same with each other, otherwise + the model may has shape conflict. + + Output example: + Dependency Set,Convolutional Layers + Set 1,layer1.1.conv2,layer1.0.conv2,conv1 + Set 2,layer1.0.conv1 + Set 3,layer1.1.conv1 """ header = ['Dependency Set', 'Convolutional Layers'] setid = 0 From 1b9705b8347995c2faa5de24424cd06df990433e Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 15 Jun 2020 06:32:10 +0000 Subject: [PATCH 37/47] Rename the unit test. Signed-off-by: Ningxin --- .../tests/{test_analysis_utils.py => test_compression_utils.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/sdk/pynni/tests/{test_analysis_utils.py => test_compression_utils.py} (100%) diff --git a/src/sdk/pynni/tests/test_analysis_utils.py b/src/sdk/pynni/tests/test_compression_utils.py similarity index 100% rename from src/sdk/pynni/tests/test_analysis_utils.py rename to src/sdk/pynni/tests/test_compression_utils.py From 9d0519eca7a64d05d989a60713ccf2ae757618b5 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 15 Jun 2020 06:59:31 +0000 Subject: [PATCH 38/47] update docs --- docs/en_US/Compressor/CompressionUtils.md | 6 ++++++ .../nni/compression/torch/utils/sensitivity_analysis.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index 408ad2c6c6..9fc861e55a 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -42,6 +42,12 @@ Users can also speed up the progress of sensitivity analysis by the early_stop/m ```python s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75], early_stop=0.1) ``` +If users only want to analyze several specified convolutional layers, users can specify the target conv layers by the 'sepcified_layers' parameter in analysis function. For example +```python +sensitivity = s_analyzer.analysis(val_args=[net], specified_layers=['Conv1']) +``` +In this example, only the Conv1 layer is analyzed. + ### Output example The following lines are the example csv file exported from SensitivityAnalysis. The first line is constructed by 'layername' and sparsity list. Here the sparsity value means how much weight SensitivityAnalysis prune for each layer. Each line below records the model accuracy when this layer is under different sparsities. Note that, due to the early_stop option, some layers may diff --git a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py index a34acb0de7..d7ac827813 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py @@ -158,7 +158,7 @@ def analysis(self, val_args=None, val_kwargs=None, specified_layers=None): val_args = [] if val_kwargs is None: val_kwargs = {} - # Get the original validation metric(accuracy) before pruning + # Get the original validation metric(accuracy/loss) before pruning if self.ori_metric is None: self.ori_metric = self.val_func(*val_args, **val_kwargs) namelist = list(self.target_layer.keys()) From f56380269b96d855c58113a36e060fb7fac492a7 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Mon, 15 Jun 2020 07:06:23 +0000 Subject: [PATCH 39/47] fix pylint errors --- src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py | 4 ++-- src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py b/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py index 70ce9aa49c..626283d43d 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py +++ b/src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py @@ -60,8 +60,8 @@ def fix_mask_conflict(self): w_mask = self.masks[name]['weight'] shape = w_mask.size() count = np.prod(shape[1:]) - all_ones = (w_mask.flatten(1).sum(-1)==count).nonzero().squeeze(1).tolist() - all_zeros = (w_mask.flatten(1).sum(-1)==0).nonzero().squeeze(1).tolist() + all_ones = (w_mask.flatten(1).sum(-1) == count).nonzero().squeeze(1).tolist() + all_zeros = (w_mask.flatten(1).sum(-1) == 0).nonzero().squeeze(1).tolist() if len(all_ones) + len(all_zeros) < w_mask.size(0): # In fine-grained pruning, there is no need to check # the shape conflict diff --git a/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py index ca888337a8..8922ec483e 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py +++ b/src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py @@ -111,7 +111,7 @@ def export(self, filepath): output channel dependencies with each other, which means the output channel(filters) numbers of these three layers should be same with each other, otherwise - the model may has shape conflict. + the model may has shape conflict. Output example: Dependency Set,Convolutional Layers From a24acd0f479c815fcd27ecdfe78c9bf99c08ec60 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 16 Jun 2020 01:35:13 +0000 Subject: [PATCH 40/47] Update. Signed-off-by: Ningxin --- docs/en_US/Compressor/CompressionUtils.md | 11 +++- .../torch/utils/sensitivity_analysis.py | 64 +++++++++++-------- 2 files changed, 45 insertions(+), 30 deletions(-) diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index 9fc861e55a..8de897a1db 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -29,7 +29,7 @@ os.makedir(outdir) s_analyzer.export(os.path.join(outdir, filename)) ``` -Two key parameters of SensitivityAnalysis are model, and val_func. 'model' is the neural network that to be analyzed and the 'val_func' is the validation function that returns the model accuracy/loss/ or other metrics on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy/loss on the dataset and pass it to SensitivityAnalysis. +Two key parameters of SensitivityAnalysis are model, and val_func. model is the neural network that to be analyzed and the val_func is the validation function that returns the model accuracy/loss/ or other metrics on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy/loss on the dataset and pass it to SensitivityAnalysis. SensitivityAnalysis can export the sensitivity results as a csv file usage is shown in the example above. Futhermore, users can specify the sparsities values used to prune for each layer by optinal parameter 'sparsities'. @@ -38,9 +38,14 @@ s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, ``` the SensitivityAnalysis will prune 25% 50% 75% weights gradually for each layer, and record the model's accuracy at the same time (SensitivityAnalysis only prune a layer once a time, the other layers are set to their original weights). If the sparsities is not set, SensitivityAnalysis will use the numpy.arange(0.1, 1.0, 0.1) as the default sparsity values. -Users can also speed up the progress of sensitivity analysis by the early_stop/min_threshold/max_threshold option. By default, the SensitivityAnalysis will test the accuracy under all sparsities for each layer. In contrast, when the early_stop is set, the sensitivity analysis for a layer will stop, when the accuracy/loss has already droped/raised the value of early_stop. If the min_threshold/max_threshold is set, when the validation metric returned by the val_func is lower/larger than the threshold, the sensitivity analysis will stop. +Users can also speed up the progress of sensitivity analysis by the early_stop_mode and early_stop_value option. By default, the SensitivityAnalysis will test the accuracy under all sparsities for each layer. In contrast, when the early_stop_mode and early_stop_value are set, the sensitivity analysis for a layer will stop, when the accuracy/loss has already meet the threshold set by early_stop_value. We support four early stop modes: minimize, maximize, dropped, raised. +minimize: The analysis stops when the validation metric return by the val_func lower than early_stop_value. +maximize: The analysis stops when the validation metric return by the val_func larger than early_stop_value. +dropped: The analysis stops when the validation metric has dropped by early_stop_value. +raised: The analysis stops when the validation metric has raised by early_stop_value. + ```python -s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75], early_stop=0.1) +s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75], early_stop_mode='dropped', early_stop_value=0.1) ``` If users only want to analyze several specified convolutional layers, users can specify the target conv layers by the 'sepcified_layers' parameter in analysis function. For example ```python diff --git a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py index d7ac827813..49676991d6 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py @@ -21,7 +21,7 @@ class SensitivityAnalysis: - def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop=None, min_threshold=None, max_threshold=None): + def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop_mode=None, early_stop_value=None): """ Perform sensitivity analysis for this model. Parameters @@ -46,19 +46,24 @@ def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop prune_type : str The pruner type used to prune the conv layers, default is 'l1', and 'l2', 'fine-grained' is also supported. - early_stop : float + early_stop_mode : str If this flag is set, the sensitivity analysis for a conv layer will early stop when the validation metric( - for example, accurracy/loss) has alreay droped/raised the value - of early_stop (0.05 for example). - The default value is None, which means the analysis won't stop - until all given sparsities are tested. - min_threshold : float - If the validation metric returned by the val_func is lower - than min_threshold, the sensitivity analysis will stop. - max_threshold : float - if the validation metric returned by the val_func is larger - than max_threshold, the sensitivity analysis will stop. + for example, accurracy/loss) has alreay meet the threshold. We + support four different early stop modes: minimize, maximize, dropped, + raised. The default value is None, which means the analysis won't stop + until all given sparsities are tested. This option should be used with + early_stop_value together. + + minimize: The analysis stops when the validation metric return by the val_func + lower than early_stop_value. + maximize: The analysis stops when the validation metric return by the val_func + larger than early_stop_value. + dropped: The analysis stops when the validation metric has dropped by early_stop_value. + raised: The analysis stops when the validation metric has raised by early_stop_value. + early_stop_value : float + This value is used as the threshold for different earlystop modes. + This value is effective only when the early_stop_mode is set. """ self.model = model @@ -77,9 +82,8 @@ def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop self.Pruner = L2FilterPruner elif prune_type == 'fine-grained': self.Pruner = LevelPruner - self.early_stop = early_stop - self.min_threshold = min_threshold - self.max_threshold = max_threshold + self.early_stop_mode = early_stop_mode + self.early_stop_value = early_stop_value self.ori_metric = None # original validation metric for the model # already_pruned is for the iterative sensitivity analysis # For example, sensitivity_pruner iteratively prune the target @@ -116,14 +120,21 @@ def _need_to_stop(self, ori_metric, cur_metric): stop : bool if stop the sensitivity analysis """ - if self.early_stop is not None: - if abs(ori_metric - cur_metric) >= self.early_stop: + if self.early_stop_mode is None: + # early stop mode is not enable + return False + assert self.early_stop_value is not None + if self.early_stop_mode == 'minimize': + if cur_metric < self.early_stop_value: return True - if self.min_threshold is not None: - if cur_metric < self.min_threshold: + elif self.early_stop_mode == 'maximize': + if cur_metric > self.early_stop_value: return True - if self.max_threshold is not None: - if cur_metric > self.max_threshold: + elif self.early_stop_mode == 'dropped': + if cur_metric < ori_metric - self.early_stop_value: + return True + elif self.early_stop_mode == 'raised': + if cur_metric > ori_metric + self.early_stop_value: return True return False @@ -152,7 +163,7 @@ def analysis(self, val_args=None, val_kwargs=None, specified_layers=None): ------- sensitivities : dict dict object that stores the trajectory of the - accuracy when the prune ratio changes + accuracy/loss when the prune ratio changes """ if val_args is None: val_args = [] @@ -179,7 +190,7 @@ def analysis(self, val_args=None, val_kwargs=None, specified_layers=None): pruner = self.Pruner(self.model, cfg) pruner.compress() val_metric = self.val_func(*val_args, **val_kwargs) - logger.info('Layer: %s Sparsity: %.2f Accuracy: %.4f', + logger.info('Layer: %s Sparsity: %.2f Validation Metric: %.4f', name, sparsity, val_metric) self.sensitivities[name][sparsity] = val_metric @@ -202,10 +213,9 @@ def export(self, filepath): Export the results of the sensitivity analysis to a csv file. The firstline of the csv file describe the content structure. The first line is constructed by 'layername' and sparsity - list. Each line below records the model accuracy when this layer is - under different sparsities. Note that, due to the early_stop option, - some layers may not have model accuracies under all sparsities, because - its accuracy drop has alreay exceeded the threshold set by the user. + list. Each line below records the validation metric returned by val_func + when this layer is under different sparsities. Note that, due to the early_stop + option, some layers may not have the metrics under all sparsities. layername, 0.25, 0.5, 0.75 conv1, 0.6, 0.55 From 91d5f4963912c7d8732b89c8a88062dbdedb67fc Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 16 Jun 2020 01:43:00 +0000 Subject: [PATCH 41/47] update --- .../pynni/nni/compression/torch/utils/sensitivity_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py index 49676991d6..525087b223 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py @@ -33,7 +33,7 @@ def __init__(self, model, val_func, sparsities=None, prune_type='l1', early_stop different models may need different dataset/criterion , therefore the user need to cover this part by themselves. In the val_func, the model should be tested on the validation dateset, - and the validation accuracy should be returned as the output of val_func. + and the validation accuracy/loss should be returned as the output of val_func. There are no restrictions on the input parameters of the val_function. User can use the val_args, val_kwargs parameters in analysis to pass all the parameters that val_func needed. From 33178a2407eeee5e0c1a8b4e5507826ce7c1fe68 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 16 Jun 2020 02:57:10 +0000 Subject: [PATCH 42/47] fix grammar --- docs/en_US/Compressor/CompressionUtils.md | 28 +++++++++++++---------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index 8de897a1db..bbe279ebcd 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -6,7 +6,7 @@ First, we provide a sensitivity analysis tool (**SensitivityAnalysis**) for user ### Usage -Following codes show the basic usage of the SensitivityAnalysis. +The following codes show the basic usage of the SensitivityAnalysis. ```python from nni.compression.torch.utils.sensitivity_analysis import SensitivityAnalysis @@ -29,20 +29,24 @@ os.makedir(outdir) s_analyzer.export(os.path.join(outdir, filename)) ``` -Two key parameters of SensitivityAnalysis are model, and val_func. model is the neural network that to be analyzed and the val_func is the validation function that returns the model accuracy/loss/ or other metrics on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy/loss on the dataset and pass it to SensitivityAnalysis. +Two key parameters of SensitivityAnalysis are model, and val_func. `model` is the neural network that to be analyzed and the `val_func` is the validation function that returns the model accuracy/loss/ or other metrics on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy/loss on the dataset and pass it to SensitivityAnalysis. SensitivityAnalysis can export the sensitivity results as a csv file usage is shown in the example above. -Futhermore, users can specify the sparsities values used to prune for each layer by optinal parameter 'sparsities'. +Futhermore, users can specify the sparsities values used to prune for each layer by optional parameter `sparsities`. ```python s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75]) ``` the SensitivityAnalysis will prune 25% 50% 75% weights gradually for each layer, and record the model's accuracy at the same time (SensitivityAnalysis only prune a layer once a time, the other layers are set to their original weights). If the sparsities is not set, SensitivityAnalysis will use the numpy.arange(0.1, 1.0, 0.1) as the default sparsity values. -Users can also speed up the progress of sensitivity analysis by the early_stop_mode and early_stop_value option. By default, the SensitivityAnalysis will test the accuracy under all sparsities for each layer. In contrast, when the early_stop_mode and early_stop_value are set, the sensitivity analysis for a layer will stop, when the accuracy/loss has already meet the threshold set by early_stop_value. We support four early stop modes: minimize, maximize, dropped, raised. -minimize: The analysis stops when the validation metric return by the val_func lower than early_stop_value. -maximize: The analysis stops when the validation metric return by the val_func larger than early_stop_value. -dropped: The analysis stops when the validation metric has dropped by early_stop_value. -raised: The analysis stops when the validation metric has raised by early_stop_value. +Users can also speed up the progress of sensitivity analysis by the early_stop_mode and early_stop_value option. By default, the SensitivityAnalysis will test the accuracy under all sparsities for each layer. In contrast, when the early_stop_mode and early_stop_value are set, the sensitivity analysis for a layer will stop, when the accuracy/loss has already met the threshold set by early_stop_value. We support four early stop modes: minimize, maximize, dropped, raised. + +minimize: The analysis stops when the validation metric return by the val_func lower than `early_stop_value`. + +maximize: The analysis stops when the validation metric return by the val_func larger than `early_stop_value`. + +dropped: The analysis stops when the validation metric has dropped by `early_stop_value`. + +raised: The analysis stops when the validation metric has raised by `early_stop_value`. ```python s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75], early_stop_mode='dropped', early_stop_value=0.1) @@ -56,7 +60,7 @@ In this example, only the Conv1 layer is analyzed. ### Output example The following lines are the example csv file exported from SensitivityAnalysis. The first line is constructed by 'layername' and sparsity list. Here the sparsity value means how much weight SensitivityAnalysis prune for each layer. Each line below records the model accuracy when this layer is under different sparsities. Note that, due to the early_stop option, some layers may -not have model accuracies under all sparsities, because its accuracy drop has alreay exceeded the threshold set by the user. +not have model accuracies under all sparsities, because its accuracy drop has already exceeded the threshold set by the user. ``` layername,0.05,0.1,0.2,0.3,0.4,0.5,0.7,0.85,0.95 features.0,0.54566,0.46308,0.06978,0.0374,0.03024,0.01512,0.00866,0.00492,0.00184 @@ -70,7 +74,7 @@ features.10,0.55468,0.5394,0.49576,0.4291,0.3591,0.28138,0.14256,0.05446,0.01578 We also provide several tools for the topology analysis during the model compression. ### ChannelDependency -Complicated models may has residual connection/concat operations in their models. When the user prune these models, they need to be careful about the channel-count dependencies between the convolution layers in the model. If the layers has channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then even the pruned model with mask works fine. but the pruned model cannot be speedup to the final model that run on the devices, because there will be a shape conflict when the model try to add/concat the outputs of these layers. This tool is to find the layers that has channel count dependencies to help user better prune their model. +Complicated models may have residual connection/concat operations in their models. When the user prunes these models, they need to be careful about the channel-count dependencies between the convolution layers in the model. If the layers have channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then even the pruned model with mask works fine. but the pruned model cannot be speedup to the final model that runs on the devices, because there will be a shape conflict when the model tries to add/concat the outputs of these layers. This tool is to find the layers that have channel count dependencies to help users better prune their model. #### Usage ```python @@ -81,7 +85,7 @@ channel_depen.export('dependency.csv') ``` #### Output Example -Following lines are the output example of torchvision.models.resnet18 exported by ChannelDependency. The layers at the same line have output channel dependencies with each other. For example, layer1.1.conv2, conv1, and layer1.0.conv2 have output channel dependencies with each other, which means the output channel(filters) numbers of these three layers should be same with each other, otherwise the model may has shape conflict. +The following lines are the output example of torchvision.models.resnet18 exported by ChannelDependency. The layers at the same line have output channel dependencies with each other. For example, layer1.1.conv2, conv1, and layer1.0.conv2 have output channel dependencies with each other, which means the output channel(filters) numbers of these three layers should be same with each other, otherwise, the model may have shape conflict. ``` Dependency Set,Convolutional Layers Set 1,layer1.1.conv2,layer1.0.conv2,conv1 @@ -99,7 +103,7 @@ Set 12,layer4.1.conv1 ``` ### MaskConflict -When the masks of different layers in a model has conflict, we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value. +When the masks of different layers in a model have conflict, we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value. ``` from nni.compression.torch.utils.mask_conflict import MaskConflict From 7cab8084942731777da911914d3a824a9fadd08a Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 16 Jun 2020 03:33:54 +0000 Subject: [PATCH 43/47] update doc --- docs/en_US/Compressor/CompressionUtils.md | 17 +++++++++++------ .../torch/utils/sensitivity_analysis.py | 2 +- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index bbe279ebcd..50681d38d8 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -1,7 +1,12 @@ # Analysis Utils for Model Compression + +```eval_rst +.. contents:: +``` + We provide several easy-to-use tools for users to analyze their model during model compression. -## Sensitivity +## Sensitivity Analysis First, we provide a sensitivity analysis tool (**SensitivityAnalysis**) for users to analyze the sensitivity of each convolutional layer in their model. Specifically, the SensitiviyAnalysis gradually prune each layer of the model, and test the accuracy of the model at the same time. Note that, SensitivityAnalysis only prunes a layer once a time, and the other layers are set to their original weights. According to the accuracies of different convolutional layers under different sparsities, we can easily find out which layers the model accuracy is more sensitive to. ### Usage @@ -29,7 +34,7 @@ os.makedir(outdir) s_analyzer.export(os.path.join(outdir, filename)) ``` -Two key parameters of SensitivityAnalysis are model, and val_func. `model` is the neural network that to be analyzed and the `val_func` is the validation function that returns the model accuracy/loss/ or other metrics on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy/loss on the dataset and pass it to SensitivityAnalysis. +Two key parameters of SensitivityAnalysis are `model`, and `val_func`. `model` is the neural network that to be analyzed and the `val_func` is the validation function that returns the model accuracy/loss/ or other metrics on the validation dataset. Due to different scenarios may have different ways to calculate the loss/accuracy, so users should prepare a function that returns the model accuracy/loss on the dataset and pass it to SensitivityAnalysis. SensitivityAnalysis can export the sensitivity results as a csv file usage is shown in the example above. Futhermore, users can specify the sparsities values used to prune for each layer by optional parameter `sparsities`. @@ -51,16 +56,16 @@ raised: The analysis stops when the validation metric has raised by `early_stop_ ```python s_analyzer = SensitivityAnalysis(model=net, val_func=val, sparsities=[0.25, 0.5, 0.75], early_stop_mode='dropped', early_stop_value=0.1) ``` -If users only want to analyze several specified convolutional layers, users can specify the target conv layers by the 'sepcified_layers' parameter in analysis function. For example +If users only want to analyze several specified convolutional layers, users can specify the target conv layers by the `specified_layers` in analysis function. `specified_layers` is a list that consists of the Pytorch module names of the conv layers. For example ```python sensitivity = s_analyzer.analysis(val_args=[net], specified_layers=['Conv1']) ``` -In this example, only the Conv1 layer is analyzed. +In this example, only the `Conv1` layer is analyzed. In addtion, users can quickly and easily achieve the analysis parallelization by launching multiple processes and assigning different conv layers of the same model to each process. ### Output example The following lines are the example csv file exported from SensitivityAnalysis. The first line is constructed by 'layername' and sparsity list. Here the sparsity value means how much weight SensitivityAnalysis prune for each layer. Each line below records the model accuracy when this layer is under different sparsities. Note that, due to the early_stop option, some layers may -not have model accuracies under all sparsities, because its accuracy drop has already exceeded the threshold set by the user. +not have model accuracies/losses under all sparsities, for example, its accuracy drop has already exceeded the threshold set by the user. ``` layername,0.05,0.1,0.2,0.3,0.4,0.5,0.7,0.85,0.95 features.0,0.54566,0.46308,0.06978,0.0374,0.03024,0.01512,0.00866,0.00492,0.00184 @@ -70,7 +75,7 @@ features.8,0.55696,0.54194,0.48892,0.42986,0.33048,0.2266,0.09566,0.02348,0.0056 features.10,0.55468,0.5394,0.49576,0.4291,0.3591,0.28138,0.14256,0.05446,0.01578 ``` -## Topology +## Topology Analysis We also provide several tools for the topology analysis during the model compression. ### ChannelDependency diff --git a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py index 525087b223..fc259833b6 100644 --- a/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py +++ b/src/sdk/pynni/nni/compression/torch/utils/sensitivity_analysis.py @@ -156,7 +156,7 @@ def analysis(self, val_args=None, val_kwargs=None, specified_layers=None): specified_layers : list list of layer names to analyze sensitivity. If this variable is set, then only analyze - the conv layers in specified in the list. + the conv layers that specified in the list. User can also use this option to parallelize the sensitivity analysis easily. Returns From e8d4c318a5ba3a71d72e03eaf80390d843177ee9 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 16 Jun 2020 04:13:24 +0000 Subject: [PATCH 44/47] Update the docs. Signed-off-by: Ningxin --- docs/en_US/Compressor/CompressionUtils.md | 9 +++++++-- docs/img/channel_dependency_example.jpg | Bin 0 -> 43051 bytes 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 docs/img/channel_dependency_example.jpg diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index 50681d38d8..0233d74ab6 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -76,10 +76,15 @@ features.10,0.55468,0.5394,0.49576,0.4291,0.3591,0.28138,0.14256,0.05446,0.01578 ``` ## Topology Analysis -We also provide several tools for the topology analysis during the model compression. +We also provide several tools for the topology analysis during the model compression. These tools are to help users compress their model better. Because of the complex topology of the network, when compressing the model, users often need to spend a lot of effort to check whether the compression configuration is reasonable. So we provide these tools for topology analysis to reduce the burden on users. ### ChannelDependency -Complicated models may have residual connection/concat operations in their models. When the user prunes these models, they need to be careful about the channel-count dependencies between the convolution layers in the model. If the layers have channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then even the pruned model with mask works fine. but the pruned model cannot be speedup to the final model that runs on the devices, because there will be a shape conflict when the model tries to add/concat the outputs of these layers. This tool is to find the layers that have channel count dependencies to help users better prune their model. +Complicated models may have residual connection/concat operations in their models. When the user prunes these models, they need to be careful about the channel-count dependencies between the convolution layers in the model. Taking the following residual block in the resnet18 as an example. The output features of the `layer2.0.conv2` and `layer2.0.downsample.0` are added together, so the number of the output channels of `layer2.0.conv2` and `layer2.0.downsample.0` should be the same, or there may be a tensor shape conflict. + +![](../../img/channel_dependency_example.jpg) + + +If the layers have channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then even the pruned model with mask works fine. but the pruned model cannot be speedup to the final model that runs on the devices, because there will be a shape conflict when the model tries to add/concat the outputs of these layers. This tool is to find the layers that have channel count dependencies to help users better prune their model. #### Usage ```python diff --git a/docs/img/channel_dependency_example.jpg b/docs/img/channel_dependency_example.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fb517fe00e42e3a5a160d7a1c3fa6cdab0cca50 GIT binary patch literal 43051 zcmb@t2T+q;)GivDbV5gIQK?FkE;XRiL_Uhr1yn?ufHVmZ2!iw`C?Fsm1f)p|NDCbm zklsQ~5R?)~q`i_5PQG)_oO{omJ9qy7KX<=bGw;mX_L`l&_I{rAtn>NvH4ytFZuWJf1jfKSQzLT z{=MTr*Uq~^91K)PRBvdgu7apJsAxE-&ig?SN?++H9{uao|7=v$G_-UVC|zP=rrglX zPU$`k4K<~+bhNaTyTd5gL9`rnoLA&^FL0SZroZaTt?(+Xh(Y95eGiYt1YT6}iC-im z6EEMzOZ;Nu5|Y=hD=Dj}-c(b+eMe8pwR(fBoLtCj2=#JUTuB{+|Bp7Zr%+f4KF(Jo|t6#X<3lnlc8o^#A%rMIA&j z8V*{zEAkgOb|D zXaCEw|I05lh=qoVGI%r`ASmdJRGc6Q`VSZlwBPQmAX-Ud`MyP4%#i6+&Cj;B*6d>g zA8rrZ<@mMh6`}>Y|BN^1I&rzpwnZoRTU)7)mkl%#RDS=8pH@)MU#p~rgaGFtGV>SW z%N0bA_BrSgm>q_cqjrF;n-XYH(altBAEp0E6*}r5h%It)=S~}Md>oOz7quz0hv)&H zgS>Oc07=tbM$D?9$(nr~war!|gw1`zv@-FaYQTlXtP}e=K`UX3(RndaH~NbRH`Or@ zx4918|F3r6p_k2#YI^RC{ud9zIViyiDIB|Y4&rx!-R?d0ZYGEf%~l8_*Z?n)OncC3 zV4b&10J7o!G~ZOe#uoRE4wQ|7d#)mp zR~v41U~l&nf_-O>7NOv+Mn@+H`}u{%+AVKcF`MGJkVbhB*xymF@*LE2o6Nkh^v%ba zK%+-c{`uS-2fxt(#B5xADQ{Ey1MF>Jw=uL^f~V;_2k|sPSb;{NR$Kn8itQPc8q&rF9w zq3zn)mCg^T!`d^rqIE>B)Yh4#t+Pbk=R$BI)CUp^y#)0>2hE>gReK$b*L-57aqz(6 z(ej?a(DmOA&t+rxWxQ=UCk{3qidsApBz+K^J_ptFz0#H^_d^<-`c|XWow`wM%eDaD zov3@Exu$XKQDHY7=J*60CtkR`|0d43)){7kK_rKR3FZ9`p}p$R_$ICU{VTi)myVKU zCkbM0latTVo+w^TcV2q@1~d=7QVJvz>Uz#W3=$K?1cUe{D0mcWa+eiilyFM8^46jw z|3Qr86)Ux1ej|=s6`HL_h2Wb_XJuVt3$g0uy9evD4+j5vixr~A7xdcy25%L3_?{t4 zCQBD=6#`cKwdp)@5)bB-G&Jlxo%G)`iXxG12X{D)1X#G=^xdF;bMLtZ|3RlS znehn!VQDlFv&#Anzi;tA({rC@TUGmF_9}5t+EMg<(l0KN$~}frP-vv~b|6d&t_5K5 z{)#Z4LA2a}m#i{`AVnx^3bCpH#uzK-Z(TWMeE$CJtZureCkBc`6alM)Rx4nFaBeMJ z19;`jWekZS8411^3*mM_|GrliY*MUeFkPuPZs-?EIj^xU}3_=q;W zzi*|gN1JnXXbrlEz|O@UnJ%ruurjIxXpz>NsHVv8Lh756WBQ)Kg~`hX*X1zKK}nkF zaBjFDey)3!&JGCbRaefBDn|1@MIud)(&dMTMLu*iYPjUvGHcXJkMzHFuvAfl_Y8SA zZG--TkB7TvH5Jc6bRa+kYa-XXB25qt7s5kRpTZ846^tWJz!x%Pn(IDv+Yc$7gIuzo z*p}+Oyyu#OKLBl=v*R-6?knirH7e5weA-Ntt zbhq;Ll4H%v^R9=!@wOSda?$1-WGwX#=)|Yq2G-XqNm^KAHJ`)W_{Xg<=Z^e|j;}7Z z&W}Z9(`ALY@!jk<{1~~Z@F^{XU+XRXln+Cg!zF=dbAxtrkz^@Pd50Z4lj%R=B^a5j zGFR_3nkt}sk8QfUP@Do6C#bxCSYj#pwFx^$?pEU*OT9KpTRJaIv^c=)S(k=ZR zE;)UvOA@AGp6raTEjcUla&f`tsc)Xz%8-$XJwNSvs)YX1hVLg*{mnn zcJTSstQu4xzsN;jnf@mWdThKS-#yip-x94E^TlSS;Z@fkrQic9-Az;@c#sl15Zs8T z1qt~QvmKVDqgWpnpkSblHzWAM>oFns)r19ckNE`Z!FaqTC+5 zBW`9`{_@jElQ1tIW`hs)&+`wd{$>+WoKn-!BtHUglqWXVC)px=tc%VqP=;A2U(!c; zQR}w-m+8xmw#9DCy~}q@P@|Xa-?Rrk1y>-RoP!ecrGPx*UGn(EcJjK&S|`+Lq73hJ z{3F))WAAf`2`|xt-H&e0?jl6DHrDikOLy~1-0I4{j&g&nMF>%AL(yb)82_)nF4>9Q zqfz1QJGtLl@3%3UZK@Tg8N#0FQ>EK{Z@a-#%8|2E`PQ&rd{Zz?lBQRi8W`?(Vhoov z!6OE9%EsbKz0@6Cdsal-MQ|Z2xh!dVzFseNsP?^t-DvKD4zT$WA|a!xA$2rB!KVQN z&GApbsmMLFtPL-eQie@_`Sd<=kU96|=cbl7=OE^XMxv9Sj;{9nxOt)TPXv=>@&B0} zpwKkOp;O^nzpbQ{H;W2^MBO&>1SPt}1q?lyRucGJra7N@LElRMY5Fyul1!DSfiB9w z-I4)?8jf1+Dl79eOB}2t_y|)b3mpReKsm!ZMYs%jv58Qo{c=;~1&|nHb?iFmI)oP;eV53FxsVfpHap2yEHsKs%9s0LQsPNW3{JzMOC zqMpumaRZTmOx43~_o%KexCct8g(hm%IQ#ulTh{7G-q#VH7qd{Gs_Jp-U2UT8t1Kap z5Ul&^;$QkASsX>C%JJmlzO$-!c_kL#t!Ggm2;-yvL{pL;-UBgUgr^^}W-8dJB}rAg z+s%9m{WR?B6|jD`_?&1)-?GHF@F(yq4&C*HkVWD$Fa{z~oC21FmP9lW3r~i`Q{@rm>P4e%8tsCRz2P2wYW9U+MNE(qMTNc{ievY z8NJ$?6CidHp-DwZ-x=bqAJVQ2t3xG9sPcq5qTZCUZkrraKFfdeUd{M$kN$-5_WFFMk6YA*-xiT&k{ zNP;DM%dx!gfc z#0@fic;8C|ZHL+#l(jBd(*X;9oH zs%zeFc}tvCteha8$c8>l`;`wqO-Z}CmT%kQ?%E3q&0UU2ZY~V(S-Id4GG<5c_e61b z=#``Lyeff7U>oJ~mKhm4ilqI_RrS zFGD=& zu30Bfyt2}_(6wN=oTkI~lhS#fNe>9b)AUKn6-i z`x;979n~JUb+jDr+l7`JsT7pSNLGVIO`L-;;4Fjtz_yR4#ZvfW0cWki8Tc3m#8u6& z08xFPxd4L-BOPIk9?X;^=aXzr9{{Cte*Ka!T)iO~#Ubbi{+a_GM5P$T_`C|ykULWZ zUeQ2EA=BX52(2IbE{K-|IY;6ZP5nn;XPl`sxGCWY;B1q=r%e_q6~yKukXioCJT2=flpWbSXLM zk5<5{Wn(Mt-JkzG5Sc1UvK2126hpt3 zzJ?G48}!OY`5N0qd7j6L)LIy8d&76|j207oj`xZ8 zNYrfrs?@!0C^nEej0P(a`zE<#R_vd6lCV?*zZd5T)1b7;720L4zvK=CVdj6~>NN%=fcZ*$V z$%uGD{lO{Hso|YA2L-~Z5V}e=0||Gr;NH7xl?;OiI9`o~t6d^4cpRmU5)9lu%Ee8t&3x!nvb1uogc z?*KjPINu;|FU3AiFvAvQL2h_HwwORTdD`iVo6{!nKNs>hU;3aB_&0@Ki3)?FMLYv9 z?Dz9O7na2PM28Ci^;TH7`~A9)=Dc%YE^Z%hhh59GN>?+V=y{V7_~?Z$&z`U7OTzV! zGnN9D))Amxx^M@|X1^e%i!Xe{|2LVjjgS=^XyT8B3}`)(`DgC#ZI7oWA*cJME!n>s z+PzN9mj{$PHQVWEX>p0+CbI*_Vxo;PfryN01MzJOhlhV;GeFeOo zHJ#Hfu|JUR9c7IU`no)QUwQ30qfK+DLE({^Nn8hVSWkK^cmTpex=h)X9=9ZP=oSI4 z#s^klq(=+Ew)^RGoXMXX0x4`4{9c4fVOL{U`4MhXW9Uud?U`o1`l-WI;o)K-CbD#G zImR;Tsa^FG8Ab76g%;Kqa+!2*N1}^wQCmsuej{3XYBQfj!l8sd$}~u%44puKrfOE( z9bw@%{5v|Yz}pC4){KROzI#NMrbOpd1s991JAZD(XQvs`uXND@(M51md?&I6ljmbd zU`q^b-#s~XSqgeNg?mziH2h}ON6iMN<(O@RYY`TE92E%1Wf%vML1DfbP1BNC)l_BH#D&7vMI4&Go>w6^swMHe)Hy=3pd@xE>RG%yRBRRWw+5KMpD81^qcS+k{%ZVu8vbRZ@%u- z*y;wd7%CV;)C@t7J{^vLsmu5HNY@&wA1v>lg|(N=Zx^zFlcoID5hYRj3} z8#YFn5H>4?`r17zP|qGNB#I$3p`xWOb)v~pO|);=PI^!c^gXEw!r-X>0*0gEF_ta$ zsCL_J3X5Q`oS!S5_H{zd(5uYz64Edw+EK#ndv{W#lq;D%H5I~C3m*j!I`RRPWkcx! z6r41l%w_d>Y+hU5r5V)v%t!vO+NZS!ox~PCh-M8p=l&oc~q<>oY>y zL)qQrZ|bf)h}obP{`Q`=QI8xq$b)q`m3MMK?5?@Sn&eha4w)>2Pdpv zA0O_-;@Cwshc$yvL%+9tx@pGyD>>_e4!BIR!pqxg53RFvti(I=jxl+vSq0gWE}W3)|Fe4prq>~D zsP{zc^x6fLvz7S)a{?r07w(K9;v}r<8>bIDEmn+;7mX`ke|G4*`Yv6Kqv6seBRWIM z5?)9986VVNH-8pBjL|UOPlT}GQ0t4j`A5zKgBa}3zgAg_4FeVSg_tX?>1uCS7&ocR zb+nT|Ha$L_%OMGz)o9WahOJvCl^lG%NG8wP>onD>?7x-mAVol%_5#sHGiq3!7?nZG z_x_IWAQI=GIvv(bIRzD`eHK5@BnWi}6f&?t@;}z7>@bo+Em*lXOI6manG`W5Ec{d< z7=EX2N*k>Geq+RdXK_MDSc7N-hfjA=DCd*DE`i3WJ(4ZKxPJ$ee5DzLbAB@H8i?@q zS17UyOquX4eCYT<<#N_K{h7ceNb*}CuGi5^kqpNkjh8MlG3 zdc>L~+VIxAL@L@PBD5Lvms#qoEApRYNZR{t4n-o?3Q>&YtFV^tQ{hxBtNu0-tDIR9 zR>)Pj&7m967LdwAGL`BUiNvA}7sMi;8Kp(gQ7QKo!N7cYXK`jZ$og36KloS zr6@xMZFbNB@>?ep-zryDmbQ7F1!B`Q?J|xUsm3xS$)c<}Y`_8W30!AFn{|<71^AIU z<~pRXi~(OeL@*AuHw=z;xJwI_5-aCf`uYqFEiRw zm4{D4KAR?o-z3>nw(MCHz#&L-z|RiO-BA~cd0Pq#z1!v>&S%m1&0Q1UF?(>g^jf-# zwOLy9Wyun30g1~IBILOum1{tlZNYRx1|2mf!bP{$@4C;P_~!W-=b8otrgi)J+?bMq ze{Ia$`QkwJ^bxo+k|n&rPP0r4bvj*n;_jaS(rHy{%8j;|Q(iO$yQ}ie&1E80R4QEZ zH?fup7tI2nJ-ZCjG8zCqOd~bPj0s0}aFLmWs)lMvpxo}4p2v0EaI?19K0BUbfB76+ z^RjmH1)0mX-g)Ps=tpaN*IBF~$SSzQvNPVauLZ&_;m+E=vw8n9MnL4Nf=cPv-rZ~4 z$ZIeB`?!gH#efh|*#raTTZJtSq8DL{SgT5}W3?ma=U&g7s;sUgr{sEl=^FO7wJebR z^Zk2`bZRpXNGn1x;a~7rS!rWXVQbBKnC}ZQ=pVvRmox zjI#f90H)5_`GT+u=RZ?>R~37$Nf=CHqp&<9J-K#qkqmOzt3$(A2n1< zo^YhkPT%<#3Tt0kDEityb>z9v;%KNDzM6z{JXh5IG0R{!Z~g<R(Mm|~R>kGXE{gtuT>En9!%*x8jsh{bDDFsWHr_xXrs<0WMnJ`CY zV#HtF6LTXg!$n-2qeTYQ5zxQ92+xNe*Z5L$tP;_Ohy*lW za+joodq^0TWJZt~C4V@u6QL~wWpNNL8iL`o_Wqe zQ@iy>1-+fct9p%1W>=b#ujO>ouCE_f+0%hwE_1lml9h(qSNMZ$Nrp7P@uHbKEs;i*=qW_*{E^GmS_MhZa0&oY8a;ZX_7n z)%!}TuBc8wowd4TaMiE|!uxU$tQN^YSmeP?o6*6%tX7ou(Dj8>mey1{zd?sp|2N?Q z)g_+If~Kr)No_UQtx=gHMolej9osVN{XX?Wj@aDW(>!;pUm-S1WvdCJj8|M@jM&%J zgtJT@x1WP%bcCIV3M53qIyAPK73w(UOhG~YV1}jITx>GOQ(2_7Mvy389`w!iMbv1d zqpFD4qXyBqt}@4KL}Sv$xmvhh8R;Gn!~(_FcSz9KCtiETdrX#_y3{gUx?~i|00QOotFr1r|Bi7sTv|9pibgDTUcGItQ6ocqo*8P;{w4=VUZZN^Cvs!Zx~! z%D36YMw>^Qs-#sp)MJb|=3=ZY7(^l&l#86`$TLvnDrI-@`qbrUE=g9j6)?cA2Zdp@ zz$Q<_?%$b-P&E0t*ge8*^HAPh#P@YmTE2=D_nSEK=d&DyI|Lj)jk=&Iu%61=Jms#f zGA^Te+hz04YI{ro>qbXJaJYy3^{*G-JRqdF(M(7dCxHG#Rv}v_(%cd(nkJH>EWJD0 z{D7@AzdK|u#)$YF9M7j|hx4rMj? zEu{QlS^mo`i}VN4mby=d>mN|VxT)%^DdF>0IOCS;Te~5GVsFU6Aw8$9TTqm3$NjCJ zc8pKQkV`Wc5k9P3j&I~*;WUJjen;0O7>TCR{WEIP7{I+1Cf4sewyez`+Q6@Uq@YCT zS(b3DSewz~v%HO(l3U*|@yp5mlzb4$L}9+IDj9;87i&Y!=o3*m=(_RzIcOZ^x|%qN z)IOqs>sT*q3Mo0Gidb*HYl@~{SNiMevv6XehdUnQclT9Y-)M?DQr;`POqL689wFTU z=GOM3>g~Q^_jv)&-ul&g<7czVhSFA;7zOt4!>vh4ucnl!Hu~1fhpt4ff@)M!CIIqy z%g@I&M|H^*<&_U0agqdeJ_jxOQ*PPkg44}*n5^$7I!a8-9ez8kl>9UvSpomKC83)8#Q8>+1_I<}mvLwtT9_>WQH`gpK)>W!f&lG|}vgI8cG5f;5 zh2>|O6NMxmP8@lzvMnFOSLyr|^toXcSJvTFeTCEm)P&r=%eC+8h*(EZlVs1zHm5t( zuqG1SPVA!i-Adu2j~-UYoGN!e!}c_n1f5NBm!B6vX`t%Kr2v~(;<+W)r%klsFVVZI|cX@(zWpp@Uwo}cEVWP#6C9yX0uiGB1r>N zyRMa@0Saw2Zh6l>`Pv%{qb^eF)a%sW}B7%ej(O0OZ8c->Fj#`_7MU)+Y# z%KGbIKFu4V)qkR}8~CP>=QJls>bUxgMr3xnIZbDkI3!vjngCUJ1E& zSR3bMK1UJ1(L*8HD)ncpyX4`5zdP$ybY#QTG6=q-OWiq$B&i`7g6Ktf7MyXMdCqlk z5a@M*JWOJkf1lkZ1>y4IN^Rs0hrjOOT;;z`wAzY@*;L2z{*a9dAx8o`Y^#o_W|ip`qA!UgT_|CT!iOrXaqF5hbT^tbQV=?0=GV zQ2M03>ln%pBE&;IoOtaahp7wapmn9E2*35dMli|WQrXUD(Ur)9!g}G~uE*r~zHR9( zw-_Q*8EYe0=323^TfQLwC4A?THw!$1v8TjKiLMoYNgiKDP#`kBy1+GpGiBq zsSjq=*tQt$%C3K*sM@;Uq}g@3osg=ANCreI5;MWE_yir>3yDehu<)C!#0(pHle_#b zzX=Pn7APhUqnM6-csxSD5#eI6#lDp*;}-?aH2P4SKHjBMY%ge$Tw|K@2d`pZGl5Qx zo`NUy9QovWph=GG=#@*rgm5!|Ev>K$J_A{3zc@OzG7vjmMcgb-U55_t#}+FStb2u2 z2QsVDPslU`L4T8o@B>>9DG%$EmqVIEQ{oX?n+b$f+VCY58~ic$y{1NR%|jS;AWP$p z9bcmKv++RY7_Fj|EwhJVkKFlII-^3Ok`8p(iX;5~C(Ty1;vkdPZX|MDKccZ+Wk!ce z^zFQ_l~mGMktS;}!C(!V44417%&c|0KZli^;rU|3@=<1J@Kpm*Kb1zB0uK-YLI=pB z){1;}*TI+mCgXj~9z&zy!bbK!+~}BP-kMK!7hoT|)0tI9j0FrtKmGY^sGT)+2n4j> z^ul2cA4lKPXe0@nZ^HO@iozpdw{#Ab5?%Wof>J?cn=W%~@X)@PYE zzTv&K?nXbV3TZh>(58lq0P>!gPP+rsNs>GzWf=3V!o4jf&y_8=2JK<}V!?xJKq+BP zm3n32&nQ*2%+pwfreaeWEmR+bldR~5Xn`bavu=lr6aH!D={(BnXQj7O|0jd45wQQ7 zc`YC&u*s5W?BN^C@=M_?$L*(n^1H^6x;`)<34OhIi~yXmb#aqJYeN25zF6r%VxDw- zGlP<@?@tkd#EKF(%&TYlH%VI~?q4EA1rs*IFOVmkSmAs`%kT+dq1Hx%Rcsi3Za0W5 zLV(8ADKn4C*VM}7-ha$9^W*n(+?uMt*Q=+NJqo1*{shIrwyXr{ zfC4Ue1fy~)m!o^{_gRzMJ&w{XHY;-`#)t+ULq~(mr5^a?L&GQf@$VHCLgsK_q7|Te z>;?p4aK-~poFonG{zbTBeQOhbEWUY_ZPBzGK-`1Lm;hGw$dTD6kJ81OlE##31ii%j z-JK61XhljuE&>$0L;h)5%eytZZHy2A4DQVBHjna|eQs@@Z0@ws;0WTrFRk#X%0%=& zZMuZYq^v+*L--`LMZB||1Rf{(;#K;Lf%J0_K0_s{67bue-5k5$<)%5?-Wc(HLb>Kt zrsGrn(w&}C`APn!a@txj|vB)V1WHNWOtppN`Kt=+m## zlLI$eTSL3euhYPTn?uI7S!iCmc=HH!QjZcG>(fL=g(q0lAfc)7S*QG(vd0E7KYNLg0iDk;95ZslxfA$Zb$g~ zbdzy1^q!Z;9~sj=^_sWyTU$Y5AF|#udc7O)5$cysmBJa~s|PGIQbuq)#V8&5(Di~eRZ7MtDL1% zhphn!p(PmP*7TvhMthv5zo`2q+khQq&LxfDm`&aUs0OR^iypMHw7TiM!1253bCAba zWDY9+NN=(O2OSI-#sB2o8aMW#TSn$qJIwb$v!vQfjCk5Kt~tj%xj@fNwZ}6^at0t6 zpVTh!+uMYgM9&)ct|v>k8X_BQf5cwXRZzJk7N0R5Vfm`|dknFOY7KFL@ZFvn+auqp*A4Ixa1V8t$kjE0iZ5 zdR&VK_wREW&XVa{#*gF}Jow+9gN9JCvU1+fC0g1)944&Xb{p3m+DOi49Wc+19hWzO zWPMS=A>Q-+UirJF!wtW-wk+43!&;kB?A{ihT4sAWW##+ef=tbt`GT_!kHKf2DjhX$ zGb*PK2+nZrQM}^u=vi&Emn`wYluxa!w!oHz`P6RB9;#tBurXx>$k461UTT3IcuW+cB)Oo$b!4$zCPh zJC%I7xmwx1oL@%1Lb;ls(ZzP%^|sYP-i`78B0|k4^!;^_>W~2+j^FB;O>NEJ@VhHv z&86y{Go4Ae%^rp3foeZ#RWZ?)_^BTT^S8YQH(AlIQg?J9Y(UWZeUcu~M>Mu483Nft zV||d<>Pue17_|T)1u~j=>uq1AwdxxOxdoGc{7{(Ii2DZC+~>8 z%i$L8pF;3R0XnxYBvr7WIoTvl>kb!~T9QlHDPVcb{D9^ER0_ zr40`oS}0O%QwKEk5U7XBUTRA~ceQznA z-{=xMSMT1Ku8lU|3|Yl(MIGO4r2~_bs#e2t0(}#Cum(!_B6T3iu^r*Yg8suM#Ym+QRdX zPvpNMH#eCp_wL-we_l+NqGAQl8;gfDm*YG!HLCYQg=Dv(7vUCw=|^B)cU)GzQBqb$-RxE;<>I+|; zmW{~{3EHwBL7-K3e(tw7vM{KsU=3pw9#Hy+~o`-jk+9LJ%vJvMy z)BK@c`oNWj6vIrKs|GJbQ@>}wyDfV(k4w{mD68VE30zp~6iT`(%-$GC!-e!E+P*5olqQw-N$f>z=YvQZ7*IJsL1wiy0#G1YVOLkJgTsHj09MlE+8pTmV z5$O+!SFWkm9d&{Yr=CPP&Ne>|mic7-*el3OtgBR~<;kf_=V)HHJ9qr8Zx`#@yDr0h z=QIt%uov*)I8DCd-(;%v)761fRKnrtoxbv=J{GPa?#lwOvc%zZt4xfmOYBn*&Re4! ze{sJ^+>t^8$5QzHmBR0MrMV)wK>aQGMrqhzTTkcR*R3xbY^f; zv&??D1_~w@5rbjv_bC02h6VLXReNv%PFBDx=qqItKAWlP-S8py@sI6MX0IM7FWA=A z%v^2FG-nz1-6+*~W4Ll3&viTxG!r-%F*3DfgsN|`Se1Cf1up{&a_=OsiR5qfI0uzY zxkF$6j@fp!ld_$fNPLcH+{Vyv$ZiqAj<`Ozi^8uC@n_UD=NGM;GHr$6Mw`3VX*rke z^fZdS#gJ8MG@>7zMRN}AK%|zpWF|!d^Wa;O_H5RWl341Su<R5Y9NdL@qFyo`)#^u!) z)&R4IW0kCqddX2fD(ovtv3_lZl6%#tYiI0V&hp@L5;$QLFYk{7G8c@bzQV&-fVLLa znM&^dA|IBENNfLLw(@M0$H<76C$v^a3ypb@JpcH1|6*|zO4vq{A zwYL8_hv9$GNo`wGQ`D61Hey})&AXU0bDQxXs0KbkWOK9TLhagO9H1s$%|JXpf65^h zn{M=cVyrA)sl6(6x*Z@(R@%Lo5XQAa8wQt#U~FesiGx=b@W}s>jLVfaR={~sGHe(WCuO3ha==WnCMW&i*QYVLB|wSQQ}w;rPk#&M zg=OZ8c#TtK?2PRPk}Y}vi20dYx+J(5_GlIM;aY_cDO0O0D_W+Bcn)G3u9VV41Uma` zj$K{V%>tJ%?o#QPJ^cz#ZjjsMC<1tFyZC^{AqWHc$(Az)rzrGia+r7qvY;M2bKG<|%; z^-j6oo%oW(fl3X4TqJXkbml;L=Wm>e6@sM*t|ZR4-P+Q#WKF%}QGZkX5RR^xRl{x^ zC(41|nya14to&1gFET08hcEYw`rW z6vorPt)KW-TTZ$=B@v}?H^A6?B^`z`z5h43iE0zt>`LOIWDAaq31j`EPfUew7Yz|a zFk)l9*|;nn)eD1adt#Z!8~%Rnxz8s*_3P056ib=p&q_wE<&#xroA(pZL$PF8x#+;L zL_5`(CZql5Bqn7PPb{ewc&-EJvfi(eBZloce61n$flo+695g&ZdM zw%oC8ekL(*FZ%iyQhmao7~-b-+5mG7q9#2fSoR4@zYGUW@0z1<&4*Ta@Sv`aAUK*r zYZc_*Xu`)vr8E=l7Vi^c`+Rui;z=SEb}(%rRfCB~4q}IIKe8&Q9iL^?lz+LIV)I_J z=#|5JAB(eO#IxbC1t&~PoD%aGMdxZR+lb@H`}qg6ine&Ay0Ap^c4K=t zzL}qXJO8h+h}KdpFe;JwWH zJuM2gwm*L#GZw39?uwOhHt8blCraP&YF@p#YFxXrfHWFyK{Fe;rDvRtlz{xX%7!l@|JhG1>oa=K-%Bh0jl{HGp2BkGjk z#0>Xbn_v5J=A_r+!}B}+70Fm8U7&UDtSZq^r%o5pBO1WK>&uBIYpa)FuKha@1@YX9 z0bgn1Nqsh8@hlURfcr_w8!4PgjdjV8zf)==y0qH9t(#H-#shpDNe2EETV`Ya^hGIF zPVx-9mai_ZH-kbrp7Yz`-=iKoKzk>m@vi+oNy7AogY%ds!h^QvCY^u>Q4j1}(+~@K zpn!{l_c4GaVX^mRzGE(h&mfEHLFGtd!VHuuB!sg}@^X{&_GOZW8U2?DN3hp=Dq5s3 z5lO_1^%ihSd!=gJC3NDFMxZ;>$=Nr9NfI%s%MMzxHhlQpTy85Xz&YjegylCtB(Wiwe_$Z4e2{MyO7chHK@=x$sVecdYq+ ze)^b+^u5Yd!J9Nqe3Nh|G8(Xh?!*J8w9lkg7GT&0*WF-pijz+bBo9)$iQI_aDEG3} z-9NspC5)HR0_J4ag9x&EOo0s(FwQ-d?miGZwiK1KiAT7&1V8cGJ#6n96p;;+IPJEN zbO$cn3p%UcEnuc8)W-WBV%aE`ym)$#KX906*A1*7cB%^sPf?#h*;jFgjG;uU{H6b3 zM*DyHvd#fZu#M5W_iA1(g}l0gPQ35_>?0jA5xj3Ww^P()-5;uC2FN_TiMbQk8t}7A zq#K6!{>n_@3V3JwfsLGh{q)VEjelFq3N`GXC^=h#q^s|SJ0R>RwbX~J>rjpMNjck% zA4X+8WlVs`VXsmE%&?GkvX(9@@(CzCaS1&Ad^EdRHQUD=o8~s7{R_ZU7IdieXD4L| z$J@OW6APi27feD&S^#FK7!5)A5Rkhbh+YY6AO{XzuT*@V^HWljqH?%?0aD7PIL(v3M6 zyMXYpx3_odu(ucCWCO9>{(rdx;{y2;>;INJIi|Sf*@_LQtxWp8F5+OY+O5vBZrQTZ zeI(m0RcgWhvS0`vnhg5e)Cu^%^p%5vn=C;20VbzVL)`O-!((R49w!&e zB7Pip?A4&g^|{{pt}u8_#}Md^v|_+`bq;D#hJK6gG{Ieu@Q3kK*6tic+3i4%?3Cvf zK*h?r&I&Br$u<}Mvk(~j7r?#|l->Tk^@Qrb+j6GU?%P%BC@#l+(JBZh^2pnO*TkT2 zbQr;nCQOfp_l2h(p`Suezp(<6#ne#Xt9Bm8FjEi8Lq-!J@xUht!lz~6G5Y#b2LP3z z_+nr1OlXmhW9V7LI;t~kQ+ch4maqopC37AA18QR1V$q@hvE~8l#wBfy0pM{$JA?L- z(E><278;(L8U{Qc2&UE6$l^#{sSMPLwX4B86@@^c_C3$N4JXZ@BsK->@=`b zZ7slqeU6;{i}-egZd#5c81F3a@U9!mVGVbku4(Z0#)Bf$1c{+bjUfkk1!gx@ozm@` zmM&y6QWgAQkrtv{;RB-zBnSKhYAEZC#8Uq-Ueo>C3xX(CPk2{0G3tROse?zm`H-p<1##FIh-fgALp31S zz?^Nw#a{!$yv(DGrYh*fG>E(alBmE$Gj}i34c;L(S=C_5sKC~}WSjoh{}~inBDjhN zs&fAt3|cHu2+U5uMusJ+eHsGfoI50AWBw)73~UAzabp^4$i zS@WNBbUSi1P8%}m8&>3$WtgXm>KksnbX98B80muUyw=7RoE#7wltL)<+{G1iEeV)m zx5YQlnH_#tET{R+!{Qiu$7KbYU6RiH?kV~XbGe?WU&IfcA|lk&MsmiZP)po_=hpmB(`>o4*~}Y~Q!1qB60;8$&650*k+$9LtwkG8Oh0ofbYZMgQvKp~fKhdYq##mNl-@1T&) z*3yw@?9Knjb7HwHXnkiAv1h4E+TJFKySO;z`jzHhlX&nhwZ2|T@apkQB*?xTJ(>F# z-4P9_5(#n0lC~gHJT0sya1$IVXLt-Ngr~(FkosIti-H5du{p;VvUGf9_ z6|{Da@o~Q(b+-Y0f-bI}R(bc0X$lSwxXtef%35bVuw8BcA)!^t%f)x?4f|yn)tgre zWs`KD`hxPfCx`FK{1G_zyrJMYFYNie$*#X@>dN_Tt$<^mlFAE(jY3~Pe5eqs9x1%X zGQ%>$V%JP)U?D$J=LquF7+y75 zuBb3?cH+CLwe9YicC1?P&OMe9-jZk+r0_v@3pI?q+UE?x(C^kDBQ|=$QtfB^Lfp<9 zKsLX4W|Mr~^LO|3g*JCo8){AXe|(y)J;^2ZTLlQtqEp*t$-=;6AvE2Se->ZMc*hj4 zNw|G$Y_qa>955}krYLwdwq`LTOVUp$z84joFSbU+7SkJ8j_m6ouxQ*93NHzr(9TEo zwuJZUGO@%3DPgnxKet;ge2rQWHx_^Jd@}=u$j2xXEp-l3s6=<#{dK8R1+bQQln#Xr zK*HpwR9cn+WekUUeD2tgTF5o-(7x^PADQrSI`!sU-@z{(ex)xr(DkNUDV{{@nL}6V z7A11!#9_K7o();ilgsU_IhAW+Br%GNg(F1&oM;)49L1#z)L(ihQlmd5er(_>I1Hst ztsvr4p+ZOZeAhehyK1Jwa?|i zxZ@cxP;F^^Bau|Rh>YF-{>8}i{$2b0%T)s3=gq#Bnm$=*tmlWak=1WZ0A;T?KH-8X zPy}#Ac0eSw7?FJrUFX#`X*>j)8*JWiG$r!=j;g~I^H~T%`i_bz2wY~OC7*RPX#jV? zjcB&!4W=Y+Z3V3ORm)}^6x+U6`QHB#Bw$@wgN@dT9=(JLX4D(j@iEwP$*^<7tn;C%^AHWaem!kFv#a(OYSvH8zp`lCO@7bCM$@-hAsF+Ms1CCjML52rz*JMirPnp_qYBBQNm z*=+G6+2R&{q##kh+PhJ&&icg9?g(9aP!(WttXA9O>%|&iMmoFETaN^E4M^<%+D_3# z=~RO_*WbVuCA*(L8`B6^9e;KU$^Tr*S703~AeHq6J@Et6TwKoU z4wQpTgcQlt2NbnaH$Nl|Ze=sBxkVdcVB%x!+RU* zMmaoq+72|Wm8jFpZ-gMis9V#jxx2%bnU7DTzks(6GDjx7K`#+;u^$AT@4mV#?Jy@T zud_TnTO5>=i#lU;aUg9v z)Yfn4lce=(e}gJ)AX8swR=BYAquqTwGScfs=8SVcH(cVO$ymOJ;K&JpJt$&QZ zB+y~tlc1{cIlT}m{d*M(J=u%=BYO*+vy7{L9xW}_ALdzq!+D9On{UaGmEo8Gx^dZt z*mN(J*I!k`_`QF$;Uvn~9ZCL5{(5XM_~5U(bcU!s$ic*w+oSziKGEHahi-yDhdM{Q zM+I0x^wWRLu^mj6X-zkmZ&Vx!F4{oV9QQ0=bf#t>MP8O7k|0`EiZ4|4CqxU;^+wf-ME-1|^n8xvQ!3rDB={%-;a->IiP6Md9Q72x z69po*pP*p-my!ty{Y%H@)}qxYMo8GfYqDSpRTrT>)ko^@Y}rHzelJooL%e*hd%x;2 zFG1>ts+Vp|@2JDSETs)ged_$t(>esJW|G|KRxkkj5!;TdKt4K%6tOJ8oW-nZGOR>| z7OH!&KkqK&mAg9E&3}Dm#}8tgL!XYmh;&?TkCQ4V?Fjg`i;V}$35~k?hn2%6#$I?k zjWmnD{!mM3zHIlfv!eYZWif%G*QFhCh}lom=@C;ajNek<+bC|qed6r38sK|z*~{y) zKHs$;j;lMHc-rGRIuHhlg`>0>Zw)V@?1LncDXw)*<5inDW#EH-9qhRjzWynbFbf^C z4H%Qz>^~;>qV1!LXJiolbUi7j8=-R$JdQxAufW0l{$M_Rw`z3HHdk0<14Sos+i?8D z`pE&_g?RR6&dXhdk5}&9jWIavgn`?5f$0^Y21AFe@N#eDpB))gD=KqZw{>;9dj_pa z|1z1PHvLx|`{L7OeTSx=pd@GBE_K#*s4T^#se~HVP0)w<;wSdSpEowtCGv4+n)N6t zd>On5a$JRNhN5Qv0eOIVXSyHzkcMNAPBj=Rc8$DT9l80tZg#O`+r+jy#Pc`>)A`2Z z(pfLNNf?u}o^}(^pPBrq=&?=aIX5JlxL8Yr7&b3EscQZ%Zlb0hF4IV` z&gqGSN}AZ>-*23$F}&jz;=Qen>BjfHo(0;KoqO_wf74X!!V7DP<0`&aoiIl-Nud-u zC0wJ}{|Ix2CF}E-Qcrg}v_v%=EJ8+tR>r@Vy|1h~ZGK*!5CZbX*r(6?eHf1U(ZpIE zJr9<|doLwT6HJq|c!iNQ-7t3M*>y3tv_SmFtwmaob6_hMnA5_0#N}OJthp*F&jY)7 zW$9Tsh+(3k07ga!2ifUP|Lotgq*oi z#7@<|W|q*0U!oq#GiR=r7>;r&ULR~7@n%#rWvGr!PS!9YVEz<@BD1<85>nJW#x<9? zo#z&ZOV>gPx97D=4qnJMgpDT2U`(*LG2w4Bel}e#y!?gj_gT)*26|#bm;ZDg{eflt zL3dg(Wpy_DDckGAB&#a9`XV~-7?8X4acoa86^k!zI20anz1lc(WAp}h?Okc7p;Zgk z`S1YR8_NjcUh6q9iLDEq3>J~g^XUmTa~@78E1=oL3}~X zHJI^Im8fkC_-vo?e3u8MSQ0%A6DVYq&>t4B0fQx6+GA%xVNBsYxduIf(C9O6Cif0C z=4coGO`o^&Y)IeUUbec=7-_r_iddF{5Noy+S9sY`T4ZPz5pr550l46hM6gg`NX_Y4 z=vpJC`@MSd_Rp~`==#1)Q@jsm9sa_BrOe6Z9Kq}!Z*!X)jTObVD>o@eMJz|LRC#I) zyk86TdpE?HZh`BqO_~5*`qU)y9sZj7kah(LA^AM5MBODPtoZbD83@t8H0I_ft&~zs zUIlw*yo*Zt5Y;jtpQ&@5k6Yv8@{g$<;0_Ejg~B%f5#l%~~X`%mvA0k6$=d5mOce()xub8}SOPG0y=zMG)03AShr*tiMM}78BC+;yz7J z`}6x-$;+o38Z>&(vWc48CVcI9#tR6x@!^y|9hr)w(b80K-&DqmcD^FHduH(_exK&4 z0q9`!gzVLWSHOJtXsd|Q)Cm*NN{$(-pvv3E^_qA99*g3z5gShvz z>jX6%TyiUZt1{LsurR|&Cb2uVvRI7pA9Bhm*oqEjUtN@bj^cnVnvcqHm~4m~)KhFi z?D}M~navq|?2(w3rU_C9Z&C_2Uy*ldrm2M4{zAfC@#}dqL(!AUBAXpQ~%&B;_aJq?+Rk^C@Opk*OziaSF94x}pIXn%TAl8P4TYmSg9y`rf+nE_PZRpOy<$X(PmM+Za_j!gse&56 z!Hxyl3i^^e$Fa&MYM{QT6|=A!JUCU%m4P+T#?Q$gb5jMGI2R=+>kSbUS)2sLSiA>Y3! zLye3g!j4g`Z&AFa)7l{I!=@jFgMu@i+`@B&Idk|^yq@!4RG0W2s;J~Li4ItL61Ies z-^5XPiEq-IeTa?JCxuYzqbRE=EgZPcWub$7YlCy>)Y|dDIgY3gGqSG7%DSQz0C_|K z!Bi>`aj{%cw&n$z@iY1cr8CH3s}Z^(0)dl+-FoFcj;pvjt$uL#S+VU`>}naf0Yy?`>&C!YdlE?Vt?gobxX^<+#6lIGYO={8!ptq zJ7%9UFMvR1b!tmnvXqT}GzZ>KJ;sn{bX#9uJBMQ7_IYGKtZl4Xra5ji*K)gbcTTY7 z4(wt8LC{?AK-4JvL5VnqGEEoQ1%!4O2aX}s4M0yu=*FZSz&+XHJXJ3IbUW}|iBmXzeb(HCA?yYNbU_{C~)R^V9CeZSXneHWqCwP|=p#BwwjU1ma`wVaybJdLYB z^C9zk9WM;OcO|T?S~sM*$GazsoYnaHCHYL#uu7`4iC%5I)RU_m*gv^9Qst2AkFAO{ zL3b{Ft!ja^&CuF$-+`!nKN0pMnkTPE>H_qr;yTt;Tns(@E*Ap13ucUSt z3px#zuk_Eq!e~05Wib|jGA8#EwWfYdgjD=qvZkmfpS>()nbzvzX1I9m*`W@L+Rbc! zV}7qUS9S0n%D?H34=G{2V(`j*35sKn@?4eBoh#``MCE3~+Rs}LYX(4JuVRj>hFZF0*{ zlsS(-qY2^DnGL}vS(a@q3b(uzv~;v?7+-G4?-S+ak*(@^Jg<57hg@Mb3-{0HCy5I3 zfe_9g(HbZ|@^F6*M`H>Jz`xw=mA7bpRB!NkDE?dc#rZjF(YuJ%>yPSs);F?ZHQ?V= z8qn$B`aeKiYq5Y*-}Nz7#kqmv%T#WJepM4Z@nVhj$uHH4huN9eA72GuwS}GrqQ;%q z4W-t?AOKyo1>a?GJ3Q3|x!&`8)NHoze49r=wFY`Mt(!*rTxL9lKwT@j7UdZd6z&ro z>*Uk)+~UlaaPG=t3>y&Q?Q@%8*{84h7M_wCUWO)kQPOw%#c>rwkJiEGgmxU8Kh@S= z&ydc8J3i}X)$JDXPCL^ml4)VMJ@*w9r5kzsz4ZAQLqP@(vRu@z!N@`T^Ma9E&Y*k zDc;5Tf)wkNO3k^qC2lHNJccg2qjN69kCN{t{d^-EtA25MI8)B)P)D#Eb)A9L4r$f?{WQ_UZM4*ws|ViU{ij^Wzgj-Z;9}<&Rsx8Jqf!)>cjc7T6Fn zu@>sE+dSJE}p!02<*&LI+c!VInx{`jXQaigBK18#mYx zl1}u=+Wc7QJJS5pwI-WGxp@&7NW~)5Q-4yM15x4ScYotgx53J&%&{p|wUb#U(<=B{ z0#}@Jebl=%S;md-r)E0uZVS#1q`muXs2Kfa@gjASWE~$do!EZP?~*@+haSb22GxUc z=0)oDrdYL(Gy6Hk`!e|kA~iAQUayn1iPw2mVjcn03^qVYtw8BgJ;~FggswSBbrQNe zD$>3=uE!z!f>j81b0adcyn4IRFl6B|{J{Khc7Q+ANWAdkmsXcDa=})T4HB za5qB^3`U08;mAR$ZAPTI#;mT}{n~FCHDTz-vTXrc$Ib#pgP-e%R+Z*Cj=9~BS@|hU zj?+X{_3kBMO*h`2h=T+ozzr75PO0%dx_?n7o5|5Wt|hAF#O$l-Y>c{T(hT?JEF91% zw|~ILI`yx~53q1^brKqeuK(#>^29n&*vZc?43$Dn!-YPMG^~{k1&kyWHY2AW87oPD z5!F7a);t7Q0b%6p-+!&^?TDlAOX@qBqX?xL@BJuiwM*tQ4t4Vlmg74wcGs2P%a)<^O&lYqGKpUTHivQiQoo$~3 z=levyvX60!Rr_9i@gA>RP#|lzGFzZM+y=N=3ZZvp$L@|hfWz7sA-9yZXf6+t+*Lk zW3>E!G~|l#eDPp6OaB%1^6@v64I_pgP8x)zUZ4h%rm(~Us%yJu-M>^fvgcx7qr;JX zb)H<#`Mf=ieqAR*{g+Y=)BO0GH;?&J!w!Tgz4`Uzo!;F3-V_5k$+Ww5%Vjg-8Tnb` zXz{p~h&#s3Ab6tXn}yh%i{>NQKKqq`3h6pNP9RrerV4ncYG|_Ry{&HAs_L%P>8d4U zO;3_vxf|4J9#*;K;oT%G@WmsISRcn<6kziK)FHT!O!UOBLUm}zqX)C1`@nTI7(>w^ z(n*RVU79Df{DXGBiqvn*Fs+USzD#58_J?-%v5oWbHlbMW!jmtbV_TOXX}ZZAy+CN* zm90;m5Cp8YO?o5n%j5x&4W>}$AEv>*iG78tZFSy5_oTdUFR)(5GJwK{|E|nNV#5Yo zW1u_}ZN?;3=WgSXJ(tine0%G2;}exi8s9gOtXLpW8qUF}M7dCGWm14yF#fdv=tn7s z&m`&(a%0Mu@5@=z{sK=rPpBMRSUdaOKZu<<5Vm-kVwFe2rZDBCAX5!_i8~ELVqEGU zk)GF@t?YbN2K9z@r5qxgWrNRZUO#45$OZoBh}mf~py|yGhXZ15^iq%5uX+;CC9~GJ z%|kJ(f~fNO1wyBc=}R~FT#Zo)k=f*Xf*l9z0e(OGDw^|H%h&xT{)2~Lr?Gve!G?{C zl5>D02W_1wnwz0^cSk^5CG%b(5< zMSL_s%{#V)D>FuK@+qt&gD7v5|1~KUb}AUauE=-mMW)Tr43-kN5Ng1#Rc+UTUvnEy zXg5HL_jWU74K>L5<*m#pt;lfvp0L&k94hsZBAW-OquROg^^ZK70MjTY&vl4I0C^M`%qr7P9OW1fE}C1Iqdcse9#6^2Z? zRu_Lw&LV|cWWxr%8~=cf$+1di{|7Y0OEm&Es8E2M)8`L|%v=KayXtRz$S|To!{8{1 zoSvWdH_?{`c>B(^E@PkG_nM5pfKng7FUnZxPp^RtQ^{4zm*RV| zWwE?(YeWH+e7OM30hHu4V{$7JeCLtVBJz@L3;A31YK6{>SJHjMv6GUIc(t|x_RZ6I z<~Ou}zS4&L(`W-^XQ)G@29n6P*%p zR1iNy13B7?9(@$tes05tSz7|PEBl=Vx-k^%baCJoaY=B^`6)VqP|^@oHu(1rf)+aK zduRy#Om1Dtj%(7Jp-OGo-{_|~hY_yPIckCxii00R^ zw-L9@@VRNb56^Szps&M&fPrNPR}NuG%utvBSQYel^ez1Sb8CE>XFmkU0dr=I218zIl6?c<=vv|ldeI_|KI1e{K>KyuO$|#a>pY?Y&>H|p zS7V@`X2)^q9}uefeaH@Q`#Yi)LlL(-4f!Wtwd7SvEqb7LinYG^!#?(RO1h|qk%55N zba`G^C@-B<$a~dV?Dyt_mg?N-96$t5Geg0O`wS7ct%3grxRqjmg@zBLsK5w)ZvBsC z6IG8~f`X9$blT>%`qE|ZeVq3f!_6vwzTtA0*fg$j>CL;t#`S|5YFJIj#eYDW5F*DB z6=DscnZy{J-yofg^NlZ_YDk=n$o`ym_2`c)5IeYqx-!}nRo}nJSx*T6vp?rV`8(h` z1{~NEee6bO)^b9Zp!*1x&94JXk8{xt+C-CcOy&+C@k^(wLicu2IzCbmh2N~=whTP; zxbIj)Sb-Q*fbga0c}|~L#05I{Y~>O)oVTO-`y4o_U3BW@YnGy$M{k1)8y$y<(56D9 z_ziNd(prRJPU#2oiIU0(Pk@W~#CfKWmkaPJKw`G}@%6#z6tN#KzgR+u*u)YC;1S9@ zCvud1tN^?fLQ;(oLa;4c_gUJUI-X;e77w;L`7eOfm2Ob1&VZVc_`WoBMYVqfK8=avW*=*C!YRbUa`A zOL_zKq{mfxm$fK&zqU<^xdNMU7w(G? z#xl=`r)bd4D5Y=TyIhcnN*?si@-_X?rW9S+joca?0kE6IeWY9E= zv^K+yoqLvk-|4*BVL|BJr~DsTZvWG>XiOenJt6pwZcS|>;$x%vTPSQPxGnka2+3E+ zT()kmsTMluD{XtQcr)=Pop+Dg#=FiDhi{4_DFYr$%F=u=9Mluo`ix8LJqC#6Kh z6oYD_-{^jI`7Fl}LKlBJF^%-%7;DyWlyUoKcW-gZl6f5D<4FJ*)J~Nqg)l2`d8N#19bAL8?);Wp?0ubk$8YM zA3XHXv3ZZCe`~D3N~mq{01I%$qz?#G#R@uvk`RXqr$~22pS4!|HO8ADMjx$}D6YMw zdPXMs!GO|bC4l$4H;mCgT|||(cCi#T9}kYS2eAhr>=rMuS-${DIOJrzRQTj$VjxoZ zWh}TBpNe13z~f+j$}o16A$5EO`vlLgNz>aBav73Z>~|d7sakzuTuGj{<{Y5Cnq!>1 zj@g}7C`)FD0&(~cluBY6eshPCX&lsYzl}T(De^B&-nSN_E}4=~y>7}qs4Ds0Xsr+* zsJ^|@nD(Z99rNhq%J-9i+Ww{9v^jL^EZ?0~!{;od3fgTd#dL^vg$}h{5-Y$sVd)jO z!3Vw5m2-%l4s_a7dr3e;ULpQ)#pV&31=>p75%G&w&ZY!^wzi(Y*RR4|-ohVcMdfMd@ytXpo|%2My}Q(&Zd!((cb9}N0WC*JH5PIeWR6)Ym9b%%hK;@htZan zfi8p6WiMoWBt>l4KFfUg2Ne1S%DJdQ-6vh8n$t1Ezi-U?dAr!voGU8`+tAEm?K!UZA3w1}^k%Fa#L%NkL+_iALWKGXl* z)DsD$$Zg(bxM()~N8#=GGkfjk8(`K5<6 zpEyYq^3j39sRW@U6}2 z>xXj3ieziAiorM__g&nC`-Fg|=K$ywxTHxpuD%S(qeW%sN^RHrg+}J;iBNX4?Z&@= z{&;7xN=@3#={CTzR#&!e+niL#R&VYt9p4ZZd)nA|z&A52`>jc>g8L!+OzV}P=L)Ww z)*v5!#~(Ww$Z{DWXA{r{j!!ZQ?vod5%f100?i_H>_@Qf$33pW+tHfVcZcFQg31^t~ zX~udf`f%4>JLi!1^L`p0nlJkt)eUjRAVDVg@H&Bd1(cnuU@<5iNyQa07jq9gliIE} zDUw!7sr4O4ZMrgYTo9gqbx?|tEf!xBv{Ybd6^x;L#?Gvql~%mq=6vJm1Eah+OUQq;_g7Yn?N9cc z$gfFmj@@Z4-R&a4b8li!-dPUoQ2_L#!C_3hd;>6P5FD|<^SjLy9}IMwwGi>+deZV)z6X!9PSzSG4EN{rXOYMA9yhew-0b0%-+h1d8SYL2%V z5ifb9M3EZ@&C{tf{?oVh1nh1j{3YgZ`U862hfh?03@yYVfmIq^cCn_Ultw*WzpwQJYD28xopnicjCUi$22o|zb0 z@#eBXx|xHl=~(pjodr6y;ip>z=u@@$iX$-ERVN5vOR%JbV&NaUJ=F;TJ}0(z8k+mf zw*rg2atZDPGdGFH-%lww7kkD;Mvkz>p!6yIDG;BUWCElc5B8syQXE0qj2SI2iC7Bs zhv`#_ZW5p+A62|J227EG{C9w3Au)Geuv+enGCHALv=CRLs^3eeQ1YHX2S`8!m3FOp zO!~N}y9Lj6p6drcPPIzcH@&&r;(h=6?CR{0hc8w>A8FBe2ej6{FSg(RsXhN`P5V(WkUg}9E_-|bj(S{I9=JlR(DiNBxpSliVl|f%C8TW z4(Q}Y%tcZV2i=vNqS&onw5)1JVjq6{sR4t8fGPR?X{dy^%a&1Z&Zxhz z&P63Rg(eJFqKypritnq5uOd5O)87!Bd^cF=gf6TH#Ddm?klXmSj9#paqBxqJ3Ij3dGD0uyN76II?h(Wn<>K1d>z=Lb4X~ zJ=o8+>)rPE#YdOq?6=)%SdLBD5C4{3abxMifW|XY( za<0)L7-f!4Dd59#AV^b0oTi_??Mk&@!9N}I15(3#&2%t1tl#h~1(Q`ePxV`}ZNWMv z+({=*!8=3Gyv(zIm1+F+{&_YN>4!8PbLo8LFSww&rR6}Zp7CQ!3K8{K{v?EYQzh5f zFz?3G)j7e;C9*9n{^)6yR6@uKuh2Kc-{jTU{WosldGjED%F6slsXdnM=Zs9a!@?3gYpY0Yqkq+Y@1=$@EKoNa^1|y=U43*Ju*McP+)2%juxK zuZ58Yd%>HQm+RuiMSm7P;||tj*%o&6^EsYne%|7~N~cm%{|MI217gsv+H3*d8F07Z#by_5lJinZ zw7RR}@lMj`tLDWQ567y;Bn>`>11IQ!7i;{+f>Yd1-@<(VKEkeaj)bPY;@>TdQZ_{7Uj#Ao}L z^_ul96=wbS;o$0bo30@p>yhK{|J_v*`#_;Mx&W%c+DV5lksHuKc=k2-CMUWwd2%|b zdP3ViXkR{(OQ`>SW1KzM+_WmPEu3QrslEyMiMk$?1U|!=9h3-FX|Xgk`Hrq@=w`g~ z6^877HG&(At<=mp{mDK2`rkv-{wD}qMNU>ib~4D=4aYP2RJe94SLB!Nh}qlPzY}{l zY-}X-Yyz<+&27$!8o3;Gw^*k6^GH=7*XfBjl-JXjAoSDY%aYyo=48k+J$1nXdCAHH zy~`6Rc!q_7--6TF=)7dfovs4@yL3^iwU1v3AOl*~O_1Wc#3E$3+Djz+GBdusQ_)CC zp@m?oE8Wcm(i zVNM=uc(B$2Ehy{e>oP?IWGM08D`asN4+jinR_|iOJZ=-7D?le2Y*>r|7}5Xv19hZ% zqpT(k`I$q8YEzm55Tac6P;d=z5$}tAmsz{-9a6gDCY z_N(Kxrh8O9ROlEz1?HfqIQF0>NPq|-j1MryK>O-(5bnxoE|ffg&e>ZSDnmM&$**xo!U)IqW}vrZnkHIu023h^$GmkLA*wCp8&TeUQ%&qaI~b z)F)p*xhnSDF=3;CYzsJqwEh6dUyCkX9O<<HLzo_Vs1^F zCn|VqyG#E3S(7_#gBQ=)8eQT$ld-Eq=VN7Z))JSf3fPWjeMusahN~Nj=KQUzmi8VV z@hSH5{@q)4H!zxM3r`dgpPQtrmdcOxdhrcI(^&eI_r8Y^kl`g{oXKQWaeZ53p5<5} zW>=K2IHSdNsqbdp#p8S@c~I^^Pv*wR(02j;&Kqsl^bN&>nCkw{kTm_kqp}3q^uzAq zheFBOl&VS{*?r@l(bjoo&vP+61utV*j@}w0k&J46r=mlA{czt*|08gBohRAtPQe(4 zflGgl1`wG!E1@~*#Yh*L5tw>H& zh^)g7fPMV;6X{v9R=);>Y(Y=cR0F96lqS+@PZ?wD%|Ts0_ee><8^rX_`Im7!A1upL z(?2BrR&HXH8(zZB(hedi38~Ug-2hl-K;{hLu&vF#ZVIGf=;z~G#M=DOF8Uk#B#BYb z4x$=$rd({v>u$NP-Q;Mx1D)Smc&HcAP_LOO5H34VcdHuG{)ds=Z)WOC0N!cyQ7o|E{2sQe~Y9%M{9+ z@wN?2gZLhV<*QAr>;+GF(+tv@fgwwo_N5<$xf@klJ)c$wlSFF?ZTp7WC>XUrAZ;0z zpdHOM_RF*0iJI_W#MK<^cYdtO!s^;a-7mN~Xg%ijsk~*M%@QJTbhGnNe`2+48zl%E zV%F#%tv2;_-haBiwg@CZSkkY;we)koe)dWhTfPa*7j2mXP$kO5mJYUE6Lo#6DC}D< z46~@t>6dK4U&S`p0dPU~Kh$(D&59pHju@U_^wSM`8Vz1=*BPsN*WPAnxiW>DbCUJu zw^UxxY*I?M?P}!{skldm|K_Aq85K-jOEQ4P6Q+#!irs2SIOxkE^L8is5<*|LlngcW z_vrR`!|Lnb)pZ|yk_Gat={l$W0jb;Yw(l|Ic+P*tu$?!10K=b{`hkVu7;wcs3;=iac@g#ZAEVxp|@}{^dWD%MR5@6zVFeR!+xXVz5Je&-v z`>|UCsnar3Z~wA-Rwe(oc6TT7BeLdM^l8)s%4$EF&`M&9Lm?=f!(A=v7&_bXu#kVW zP@H+~r>0Dc_tkT5A3ekT)pCcwCNH>NzDuG7AX+XA^7k^*(E;kQ1q}&XQB`!S5!{B$ z6@6v$_TjCuhBU~{PmNe(@>x*X!!oQZ4NAG3KCc(!N9@#)pDE$rh{Rh7)?p$b#Es&Gmc(JBp2 zPE8S&%enj53zgyQL~qvNU$^UhFj_5neDr3A(l*Qj=(E%5wom>6-3Q!A(W^ZfSP7&$ z;CzUT3iZN8maEiYnu-_OV$VXx*q`eE^jvuE@cqIAmV+NY-`zT~u2ITEXx}g@B*@E$ zx7W{5tRL~dHhBsnlqZ<}6Qdz5tbg(6qmvO>f9m4lK^OIHIdZPE9X!UAb2TSM!B;xT zQmLh#Hb$a*#Dbpg0ZBMwV8QZT9K7bl4AR;XH{vtNB0N6R zj}SAGnFV}5S`JrOvqplYa95?c;Is#`RL*rh^^|;}sdoMa^Ygt@W#uhUs{hu@<$%e1 z-}rtkD=l*QiF_m^qvTe9+ zti+an|DKVuBJp|bHMX;HFg+@MxxJPqVj4mketMuoNQgzuR<~UvlNM5+HC$Pcok}jo z*3B0VMrE)!y*~CK#|*H_2`%ed&t8q7kA5=++|&`W8=&0dH5c3u@Bi zL0Y^`bA1upX*HWa-RaktrLIdv?mvKlqgmE~44HC}XLxwIFK}WuaHecZ)mnP(x|ra) zvXX1Zx5{SDZ>i~35{cXCm8oB^>PzOsxb6K3SEBkxlM14^*ty|Y8$(vG!samZaUydwoIXyGL zog*y!GafN!t)VEw|KNkY>phzKRu0V&8a)NBo5r?_65!cPaaf|5)B0}@#5(v_-XoFje7(0vlCk)Tv6 z{`+mF+0o+Swhyr3G8bic|5N442Nv?r3J1zjr?Rz`2%T z92;)G6ob-q7ufaGC zEMyfWC1Cx77uBhJ(nBg67v=* zph3>eo-UHa5?(K7@ZR`b4??3i#Q=grcXXaMn2BR7`+blX^PWG!Pj-9iOzK)mu_ssd zG8VItIYAU<9YsUiTND|$LBT<&-o!xZI{fxMXa5*XtOV0Dd1vPxy!Kz@iH%3SPxJQ-lS~>D=-c&4=@aTm zDK+Z2R2n1Qr?-}oj9JLF0$-4*xJr>OI|7`CK}-&?j0F|zFLGx2D2yy-muQ zK7~Fitv?dGHq4KMoLM8G0dxV2C#?)k+*JUI=#SQ^!Z{M4mInC&{ZZc$dc2X4J-I*X zEzovP>xD&})1vXo9BW?#K2x@Bn*8oQU{(gSNghRr%IJ19ZiJsspIE=qTpMC>!v?}t z)S50UyfyGbomX=ZfBvZQLgXDLQ7ZgAS^#vq1cOaIzatuAUjrlseT@DzWJo44RquIo z))-JA&(Mp9ad1U6Vz+3lDY%U3Upomylz9uF$R7S^ zIbOZe&j>km+4e+>hzbZ%C;Unq=e&ElKu5gr$(d8DP`cHj7FC|sN+*7SbAgLM!v(U0 zhqD~9c`Up&NaA^zBJ!`r)B(!Z*2g~5PB)@W(*K_0?vtPASPOi(82?*uL7pj~d$8c% zfqUNg0>KvzdcGrA&4shRxkyr6!7bBuj+Gex+Jn7cjBUkI3DFwLGRfUhHddzc5$qKriopdf_)bfGCep#SUjWeoIG$< z-I1(O7yqL6lEXUN(G7V6iAG;sP?&$9>w}(P1EI1ZBdf!?Oq!D&4aJD|iy+qQ#lvp8 z9GmkN*2K6Em)pD3W^&uhn?ajHZDTsPURtnOxgxr_xBki*!WbiVf}58?N=C%Et&@D) zKd4yUtv4x+M||)lsJg*GnMq#qlS~DUVT3y(a)`+Wh)@tm+aFFQQJOD_>-;2dk_QA& zS3t*qylzSismcC%Uf|~+HjzcM{|(&0^>6uD|F7>S)V|v#0JvQNIw(Y}&=@?H`xq4e z_!eHaC;?QheB^1jq$>TrI2`7*LT9g@?GLd5n-?{-!R~Xwry*cdgoyxhUBttep!}Yx z0Oq#qpsDJECQ{IYAT!bKS`CaBG*(KOpiJG2VCmM`4@j-I>*nQFk~mmjc|bJcPvu|E zjGLK)EQ!E;2n3XkVcV6@bMJ1?YtrLb(o{>%bzO6lFxV>1RvK&dJ!n9u+y}DD1VpG> zx2u3YI$_Q2^j1cvE!^us@X}A~4I}SKAx-RL`F$Yc3z*kP+1g1GHyzJ-i|o^0<_r@R zV=F3P-lOAU?>bz7SyD(Vqyo~K6&Pu6ljKz#AJA)LXH|7?O@;3do8zxJEDr4EKp?%kQOeGLl$stkN>beN{a!7WBCv>T-S_#b^0sYQ5Tyv z$F+;x>vtyOFrLI1spCdIE}-ITB+baQY0~Em*x5ZZQOb?kWd{WO_Taosg*ySD=Iwer zmKk-Yt{Vf$Z$>a}7Layy)fb^$3pfe18$$!!G)~M*5#q)5R>oh;tcr2Smh}uC@6l1a z-g&*=*y=RSAXjvsuiJ6Iap}YVLcym7uRkODmWy;;Dhn3@ z(Ss-vQ~6`cO{c<&+PVqyW9btTP$yuoq6TF5lUx|1ltkUAmdtsHaXcw2pgiPCN=h+( zxA-LNKu7Z~e^F#Xgs^>Kyy4fL!hIdAIy>b9!+Nne`ThOja6zqN&QEl-8xkMq8lm}$1i3CL@_d)Lv)f)Vi%Bt^xC)^lnSj`FFoQ+{Li!9b3#8` z&yTUUF@+A_G@UxoU!4?YLFfY+Z*xY(ww4sWWi%$7#6Es}*0VS~e7mB5(`o1qSQcvD zu#>7?AXbT@;wGhFK(l-%nO>vuu2D}D#0;P#~C+`wWg8~PkD^r{CJ5Y8s!V)~y=j7Y> z^*>Inik*0)8<^C1`~shWShpDD?niFQ6>;@{D^UM$#p{3hw`qY#F2Aiy@*%lI&l6IM z;Im05i*M@%yAMxHC_!Tzotg}@56lzf^c~f$%$)3>o81*oAN!i=7SE=?ZZJU$H7#-^ z+s`8B?dRJ-;})NZOgvS~>g!7T|I9u+>9tRoH17^CbReZ!DLG) z`ghJVEnP2WL;1*qm;1v)>M??KetMr^rD}Ih)JygLf1RQK>x{L_VmcmHgiA7yPnHK@ zz&z;dRg;^qzSRz&ac`MW6*(B1`cnB=Vb7!|A7Wl;cVcL%=`1KvgN9y<)p{1rgRb@t z=HbCC>aBz-dn`_Vy{Er*C^l?y4h`LX!DtK4F2=$on22a2*>ad3rFSQ%-fm`p%Y*8& zQw;?&;hc8-*I2O0}Lpy`6KVC+^cVHTxg%^@d)#PV~ zxLp?^;0p}ZUgydHQS~wVys8&8pr;&WK%Hh-k5Y67>zF@X$~0RK5;rf!;EbPA%8#qf zC0ka@WHB#0XDtjyD0hVA~VFA_uC*me-yEyqG^TZmBS^UL>~$&{Gbub*~~KZrm$0xm3G zP^ZXw2-tvybO4>!QY_z2#OiW)%n5&+UTHaxr@MahFGKdls+3napHkP$!ivk>GCk32 zd#Dc5q{0NK(3nZvA&n%c)5!e`A41;G7tMW6qeP@$vP)(%$btvBSV!f$L?5P}W@V9K zKsZI#H#)pFP%-< zQ-Bw!ud&g4G2Y-n>2J8-^yDIz$;b)ja!03CX zAn=&78vV8yX5JX}ujPS_Xn0mqOJT z95kj2{@!o5=8787VZw{z^_X-_+Uac0!@2Oz-kF`f{;AzZ>`1@#XTFag^HVz(*sn-w zfiF(3Gk{)^-U+)-yF5wKCAKMz|siSVk!LX&*Zmb-L7dNCxHG6Gi%RUTK7(YE(# zvT>1V&?oQ&xlbid233S!9omL){b6e}2&?ea)P-b7v|H!xYL_t^j>VD(+<0me6u7nZ zqA#bIQdMKJAy}G}zPG_}pvV2M)y~s~k1F8Fg?t_yb|w=WI|mx|RgHwEI^Wb*#h#dz z+bOo~;X=QbI}uZ*yMFZW)S2bj7ARAdp44bkvfpG&nBgd`V!zrL+d<0u#70hiY9nd4 z>qdt>ucfJ)423I&8qqbY%0Gmu3_xP*M5 z=5zQ7~Gevt-FM~>8A5#3WU^0sf?}dlZ4)$P-%itm9KW# z&S)h*kf-nLe2W3jBJoMS;;ef0h~%pp8*y@LY=QrcYVm)ZbpAv!xcUm}2#h_9s>){( zD4AAjeB8>B)Z97$)?X=>La2+oGy)oTOHxYuE}xv8bVcd2rl00kf(FPqB6rhcs=^o` z-xe-YFIGESdbM=vN1-RudqS#qydrNhw6HYqX<9U_=XF9tg7~Jc}y&rlda_?0u1kVER{L@pG`y zFC+@6f@pSdU@RG^v`JIt>Z3Nhd_nNxm3hqx4fWnrm7ClBiaV!aqmUWDvsKCVBSwY? z0T+_09B|t&3=$vBhD@GX9DF8*>cB_=80T@-rmN?Gz>cBSn`@7{i{`#n7Y_CNKRDTY zKiYnH(d~!B)s2hn43SPK$#@Y|USbkO^0+{lSyt0kN|?x%meGlQea!(9RcL%R6 zgN3cGo`F8Q1*xq2K?8lKc@Mg3J*J&{?;lK&qAeQktw&Bo`W8b&et?94P;d;#$vXVV zItKK0;l$y}tDvT!>1ZnYCGcpmLwH8Bm~#dZweX6jsq53UcM7U#aox@VwtL4V41cB3 zSOWV4$4?^-g86k>(pZ)l@2$f^b$!1piN#Ed_rqXiG9%uTtd%2*H~C2k7TGi5r)ugY zMDFc%`OR2=L9kVB_aRQBYs~1hi88G0yLt1zMvYX3Ja_3>ii3QVn4Mz$s1fgxb{IAa z<7>$cO6l>Hot9C1N*_J;GuoW=EXMR=+qA=QCq?u%W#A9?XGe z3(c9JQ#DjtIgahqS@Q_)(JvxW3K;0h?*56qw=VBzovypa`N%iBBGnYDqmy;|iqoW@ ziAUcmyDBt>=H5-DLpwGhNH0gr1aCpA7GWw7fsGY$?2g{1nHd0vVT-1$p=`e_3Y4FeDW)$$GJp+y>RnbB zS8Xb-yfjCMRJn>TR66y5Q?;bL~CFYkZo&4*}^S>6B&s7F%56&+i7C3u{9O(3gET&H) zD0i8rQI3E!i1g|#?IIlGh~Byu?66f&s(a$!1Foq}6#)zVxD@QXcrtP-=r~fy)_x z^o>2|A#eeQh_hvh&eOV2$pvFo&bKhw>x%T(Mi`6J4gtbLZ@-_2^JAaq9Qq zSxP&G|Fruq`>*%QzbtqUPCYt8hB$RY$=-MeF&%05-|?T?&r55=7q$1}YwDKz&ly!= zU%NExMyNIUE%-pGh^U`<6rZP z&@Tcp%#Xp%IwnmKW$N~6Otxcr*Q(HjHGu{n=`pe1GW@hj&t;Pd+9SR{wMA?50+B}$?C5H$O?B0k@THuRcc%K;>}{2%Y;y4|8ipr zFY4s_9BRWDr@u3MknlsF#-~sf?1~-D%g*p+T(kF=bQR{LonwO#JIM7KWtJD<41v>?+`|vf%KtE zU4(}6b?xRyyK1v4+$A#B;2k~3GEcXM)paT7(|!=^0zwXp(mO z))!eQdM(qE@y-I-efeK4J67ss6}>JFzb+|bp#ErP+R34)6Tf-+w|`pWFW7ryABLQ( zqJw3zCz)=Ac&1zMAZAgkjd*c8iD`Ww3tIp15v;T^4>G>4op>rDU{eZE{Ed8DH>vD8 zc(}C`0rz8QG5Py$F%Zu+O57N`pzM~%t>GhZegBtZ{cxb<-*>K3Xm`8_^h-`_uPhFF zgNplfyAq^n3H}$tlomubFcr+>48IQL>rfGor-cZPq9@K&;Hb)qtU>b;-;IsHziPEm=j@Uvt)3g@&1g)Sr}lEvWGaTvyYl>K z(FLE2`nvO(!#t+vg0eidIXNB2-y-e-XJcs424Bq3`&CjN><*C~i%Hf!{KnGUQ8UgF zWwFyPd+sv@9O+5nWs0?Xu@?gHbAiF0zRf{qm+}uS*g`>-Ig2NY3K3g_Yp^a~_JZO_ zmdsS`SU=zA7J74k^PBtke+Fmz*l1I*+~*pVQHz_;0)*UJ7{xC(+OU~}Wyo&q!D{ED z>|)v+UCY1?Ihcq#M$5XP*-QP*Hoe`Qlg+l1iTh?gTH2uk!cxeb(mn*@DZJ!8Vnac2 zV*wU8QElR-03s+hkFImDWWvRPX6o_p+|__zMd9BEW*TMSJbe)~g$BH9Qe58s!00OV z1D6?)X;5YiGJChWRX&8gUDcf0!CX2lzPTNN^Fm?-S;Nj3VgkZ>0n>&0@25>wnbu(y zcZ^v~zG+hNLfvuw6^=Q@=KhhHYg;CL68;~8p6JEXUo&cRbq&_H)bfhOc??%{X_CeBgFb1EUShcBH8U=)iU<3Ndrv++Z18;> zRn%VljxSjJtd*V>=loglIX`ErKi^-sq_=zsW@tIr%A!o^`UT#5r|PvyO0Aj7gH2mq z1r0(ukBdXS4&TXoY+6))A@h8_*ykB|vYfi!p?~`;hyCjJWS+!%9d1R&#iPxA?cl0` zDPM54c=d_qQ=h16&iI)Lu4vJSjgove{fN>7?tEqmM=GFCw{AzV!-OC)Or@gkqSdOe zd;Ac!rM#`+ldHbIaxd{UgVLdc7s>``eQK~tr~jtk>E^#(Z|x}iR3V?^tJEL1Tq)Fn z#qVK5HtVqfq`vQ5`KGdR1qD`Z$=_2J)+$;d-+`(&2I2lD{iR^x74q?NpjNmAn~hm;I44s)XO1cIzK%8gkw~XOrw7#103N!}lRP%M+Z)7g66i&b z2d{@??xLQ-5r;2B#Xqj&xLAVgVns7f_`RSO!KysZ*ydcLWsd|+LH1r4o6@W@aUlm$ zQ;yrmMG(0bmtkmmi+=@0-@GF32;H!z`?V*Q7l+CMuEn3kpAqP_iH4Shsi+`lDY{$& zMKrWo-ehFEo{>aIPx(SjeFolfVG5^HX`0On`>MgUYHSW7SJ3K6tl+=?2-v?uU5@;x MQtbcm3HdYqAHe};CIA2c literal 0 HcmV?d00001 From 3351cefbb08d0c9cb068c792c6356b24051bc6b3 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 16 Jun 2020 04:18:36 +0000 Subject: [PATCH 45/47] update doc --- docs/en_US/Compressor/CompressionUtils.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index 0233d74ab6..3d3ac8ef21 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -84,7 +84,7 @@ Complicated models may have residual connection/concat operations in their model ![](../../img/channel_dependency_example.jpg) -If the layers have channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then even the pruned model with mask works fine. but the pruned model cannot be speedup to the final model that runs on the devices, because there will be a shape conflict when the model tries to add/concat the outputs of these layers. This tool is to find the layers that have channel count dependencies to help users better prune their model. +If the layers have channel dependency are assigned with different sparsities (here we only discuss the structured pruning by L1FilterPruner/L2FilterPruner), then there will be a shape conflict during these layers. Even the pruned model with mask works fine, the pruned model cannot be speedup to the final model directly that runs on the devices, because there will be a shape conflict when the model tries to add/concat the outputs of these layers. This tool is to find the layers that have channel count dependencies to help users better prune their model. #### Usage ```python From db0ff63c2b6769c7ec4b2ef16e9f50da65045f01 Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 16 Jun 2020 05:22:46 +0000 Subject: [PATCH 46/47] update Docs. Signed-off-by: Ningxin --- docs/en_US/Compressor/CompressionUtils.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en_US/Compressor/CompressionUtils.md b/docs/en_US/Compressor/CompressionUtils.md index 3d3ac8ef21..09418912b9 100644 --- a/docs/en_US/Compressor/CompressionUtils.md +++ b/docs/en_US/Compressor/CompressionUtils.md @@ -113,7 +113,7 @@ Set 12,layer4.1.conv1 ``` ### MaskConflict -When the masks of different layers in a model have conflict, we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value. +When the masks of different layers in a model have conflict (for example, assigning different sparsities for the layers that have channel dependency), we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value. ``` from nni.compression.torch.utils.mask_conflict import MaskConflict From 7153bd70155611d01ddef551337a4c7dccbd087a Mon Sep 17 00:00:00 2001 From: Ningxin Date: Tue, 16 Jun 2020 05:41:18 +0000 Subject: [PATCH 47/47] remove unnecessray comments --- src/sdk/pynni/tests/test_compression_utils.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/src/sdk/pynni/tests/test_compression_utils.py b/src/sdk/pynni/tests/test_compression_utils.py index 7f6d56657d..803666a50c 100644 --- a/src/sdk/pynni/tests/test_compression_utils.py +++ b/src/sdk/pynni/tests/test_compression_utils.py @@ -76,21 +76,6 @@ def test_channel_dependency(self): assert d_set_count == len(channel_dependency_ground_truth[name]) fpath = os.path.join(outdir, name) channel_depen.export(fpath) - # comments the visulization test temporarily - # because, this test needs the graphviz package - # in ths os.(apt install graphviz) - # def test_visulization(self): - # outdir = os.path.join(prefix, 'visual') - # os.makedirs(outdir, exist_ok=True) - # for name in model_names: - # print('Visualization for %s' % name) - # model = getattr(models, name) - # net = model().to(device) - # dummy_input = torch.ones(1, 3, 224, 224).to(device) - # vg = VisualGraph(net, dummy_input) - # picpath = os.path.join(outdir, name) - # depen_file = os.path.join('analysis_test/dependency', name) - # vg.visualization(picpath, dependency_file=depen_file) def get_pruned_index(self, mask): pruned_indexes = []