Skip to content
This repository has been archived by the owner on Dec 1, 2021. It is now read-only.

Enhance profile_model.py to output JSON file for benchmark #691

Merged
merged 10 commits into from
Dec 24, 2019
81 changes: 67 additions & 14 deletions lmnet/executor/profile_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
# limitations under the License.
# =============================================================================
import collections
import json
import logging
import os

import click
Expand All @@ -23,6 +25,9 @@
from lmnet.utils import config as config_util
from lmnet.utils import executor

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


def _profile(config, restore_path, bit, unquant_layers):
output_root_dir = os.path.join(environment.EXPERIMENT_DIR, "export")
Expand Down Expand Up @@ -57,7 +62,7 @@ def _profile(config, restore_path, bit, unquant_layers):
sess.run(init_op)

if restore_path:
print("Restore from {}".format(restore_path))
logger.info("Restore from {}".format(restore_path))
saver.restore(sess, restore_path)

main_output_dir = os.path.join(output_root_dir, "{}x{}".format(config.IMAGE_SIZE[0], config.IMAGE_SIZE[1]))
Expand All @@ -70,7 +75,7 @@ def _profile(config, restore_path, bit, unquant_layers):
with inference_graph.as_default():
tf.import_graph_def(inference_graph_def)

scopes = {"_TFProfRoot": 0}
scopes = {"total": 0}
scope_idx = 1
for node in inference_graph_def.node:
names = node.name.split("/")
Expand All @@ -81,13 +86,14 @@ def _profile(config, restore_path, bit, unquant_layers):

# [level, node name, total param, 32 bits size, quantized size, flops]
res = []
res = _profile_params(graph, res, bit, unquant_layers)
res = _profile_flops(inference_graph, res, scopes)
res, node_param_dict = _profile_params(graph, res, bit, unquant_layers)
res, node_flops_dict = _profile_flops(inference_graph, res, scopes)

name = ModelClass.__name__
image_size = config.IMAGE_SIZE
num_classes = len(config.CLASSES)
_render(name, image_size, num_classes, bit, res)
_save_json(name, image_size, num_classes, node_param_dict, node_flops_dict)


def _render(name, image_size, num_classes, bit, res):
Expand All @@ -114,13 +120,30 @@ def _render(name, image_size, num_classes, bit, res):
output_file = os.path.join(environment.EXPERIMENT_DIR, "{}_profile.md".format(name))
with open(output_file, "w") as f:
f.write(file_data)
print("Model's profile has been saved into {}".format(output_file))
logger.info("Model's profile has been saved into {}".format(output_file))


def _save_json(name, image_size, num_classes, node_param_dict, node_flops_dict):
prof_dict = {
'model_name': name,
'image_size_height': image_size[0],
'image_size_width': image_size[1],
'num_classes': num_classes,
'flops': node_flops_dict,
'parameters': node_param_dict,
}

output_file = os.path.join(environment.EXPERIMENT_DIR, "{}_profile.json".format(name))
with open(output_file, "w") as f:
f.write(json.dumps(prof_dict, indent=4))

logger.info("save json: {}".format(output_file))


def _profile_flops(graph, res, scopes):
float_prof = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
float_res_dict = collections.defaultdict(int)
float_res_dict["_TFProfRoot"] = float_prof.total_float_ops
float_res_dict["total"] = float_prof.total_float_ops
for node in float_prof.children:
scope = node.name.split("/")[1]
float_res_dict[scope] += node.total_float_ops
Expand All @@ -136,7 +159,12 @@ def _profile_flops(graph, res, scopes):
elif scope in float_res_dict:
new_res.append([1, scope, "-", "-", "-", flops])

return new_res
node_flops_dict = {
'total_flops': float_res_dict["total"],
'children': [{"name": k, "flops": float_res_dict[k]} for k in float_res_dict.keys() if k != "total"]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[{"name": k, "flops": v for k, v in dict.items()] ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changed 👍

}

return new_res, node_flops_dict


def _profile_params(graph, res, bit, unquant_layers):
Expand All @@ -147,21 +175,46 @@ def helper(node, level):
is_quant_kernel = all([layer not in node.name for layer in unquant_layers]) and "kernel" == \
node.name.split("/")[-1]
bits = bit if is_quant_kernel else 32
node_name = "total" if level == 0 else node.name
res.append(
[level, node.name, node.total_parameters, node.total_parameters * 32, node.total_parameters * bits])
[level, node_name, node.total_parameters, node.total_parameters * 32, node.total_parameters * bits])
idx = len(res) - 1

node_param_dict = {
'name': node_name,
'parameters': node.total_parameters,
'size': node.total_parameters * 32,
'quant_size': node.total_parameters * bits,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just question. Conv layer can be quantized and parameter size is bits.
Other layer type (for example, fully connected) will not be quantized.
This size calculation assume all of our parameters are Conv?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@iizukak Thank you for your question!
No, if a layer is not a quantizable kernel, quant_size will be a same value of size.
quant_size is determined as following steps.

  1. check node_name is kernel or not, see is_quant_kernel
  2. set bits = bit if is_quant_kernel else 32
  3. calc 'quant_size': node.total_parameters * bits,

This is the previous implementation, but it is difficult to understand as you mentioned.

Ummm. OK, I am planing to change these logics as follows.

node_params = node.total_parameters
node_size = node_params * 32
node_quant_size = (node_params * bit) if is_quant_kernel else None

This plan is to change quant_size is None if it cannot be quantized.
What do you think?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This plan is to change quant_size is None if it cannot be quantized.

I tried and implement that, but it is a little complicated to use None and int in the same dictionary key's value and this implementation changes the current Markdown file format. So, I would not like to use None to follow the current format. Then, node_quant_size will be (node_params * bit) if is_quant_kernel else node_size

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@iizukak
I changed to use named values for node parameters so this becomes better to understand, I think. Please re-review this! 👍

'children': [],
}
if node_name == "total":
node_param_dict = {
'total_parameters': node.total_parameters,
'total_size': node.total_parameters * 32,
'quant_bit': bit,
'total_quant_size': None,
'children': [],
}

sumsq = 0
for c in node.children:
size_quat = helper(c, level + 1)
size_quat, children = helper(c, level + 1)
sumsq += size_quat
node_param_dict["children"].append(children)
res[idx][-1] = sumsq or res[idx][-1]
return res[idx][-1]

helper(prof, 0)
if node_name == "total":
node_param_dict["total_quant_size"] = res[idx][-1]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

res[idx][-1] appear several times.
It's better to add name as normal variable.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@iizukak
Thanks! Fixed 👍

else:
node_param_dict["quant_size"] = res[idx][-1]
if len(node.children) == 0:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if not node.children:

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changed 👍

node_param_dict["is_quant_kernel"] = is_quant_kernel
return res[idx][-1], node_param_dict

_, node_param_dict = helper(prof, 0)
for elem in res:
elem[3] = round(elem[3] / 8 / 1024 ** 2, 5)
elem[4] = round(elem[4] / 8 / 1024 ** 2, 5)
return res
return res, node_param_dict


def run(experiment_id, restore_path, config_file, bit, unquant_layers):
Expand Down Expand Up @@ -217,7 +270,7 @@ def run(experiment_id, restore_path, config_file, bit, unquant_layers):
@click.option(
"-b",
"--bit",
default=32,
default=1,
help="quantized bit",
)
@click.option(
Expand Down