Skip to content
This repository has been archived by the owner on Dec 1, 2021. It is now read-only.

Fix a stability about multiple measurements #1001

Merged
merged 10 commits into from
Apr 30, 2020
48 changes: 28 additions & 20 deletions output_template/python/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ def _pre_process(raw_image, pre_process, data_format):
image = pre_process(image=raw_image)['image']
if data_format == 'NCHW':
image = np.transpose(image, [2, 0, 1])

# add the batch dimension
image = np.expand_dims(image, axis=0)
return image


Expand Down Expand Up @@ -98,7 +101,7 @@ def _run(nn, image_data):
return nn.run(image_data)


def _timerfunc(func, extraArgs, trial):
def _timerfunc(func, extraArgs, trial=1):
if sys.version_info.major == 2:
get_time = time.time
else:
Expand All @@ -112,7 +115,6 @@ def _timerfunc(func, extraArgs, trial):
runtime += end - start
msg = "Function {func} took {time} seconds to complete"
logger.info(msg.format(func=func.__name__, time=end - start))
logger.info("Avg(func {}): {} sec.".format(func.__name__, runtime / trial))
return value, runtime / trial


Expand All @@ -133,34 +135,40 @@ def run_prediction(input_image, model, config_file, trial=1):
pre_process = build_pre_process(config.PRE_PROCESSOR)
post_process = build_post_process(config.POST_PROCESSOR)

# pre process for image
image_data, bench_pre = _timerfunc(_pre_process, (image_data, pre_process, config.DATA_FORMAT), trial)
results_total = []
results_pre = []
results_run = []
results_post = []

# add the batch dimension
image_data = np.expand_dims(image_data, axis=0)
for _ in range(trial):
# pre process for image
output, bench_pre = _timerfunc(_pre_process, (image_data, pre_process, config.DATA_FORMAT))

# run the model to inference
output, bench_inference = _timerfunc(_run, (nn, image_data), trial)
# run the model to inference
output, bench_run = _timerfunc(_run, (nn, output))

logger.info('Output: (before post process)\n{}'.format(output))
# pre process for output
output, bench_post = _timerfunc(_post_process, (output, post_process))

# pre process for output
output, bench_post = _timerfunc(_post_process, (output, post_process), trial)
results_total.append(bench_pre + bench_run + bench_post)
results_pre.append(bench_pre)
results_run.append(bench_run)
results_post.append(bench_post)

logger.info('Output: (after post process)\n{}'.format(output))
time_stat = {
"total": {"mean": np.mean(results_total), "std": np.std(results_total)},
"pre": {"mean": np.mean(results_pre), "std": np.std(results_pre)},
"post": {"mean": np.mean(results_post), "std": np.std(results_post)},
"run": {"mean": np.mean(results_run), "std": np.std(results_run)},
}

# json output
json_output = JsonOutput(
task=Tasks(config.TASK),
classes=config.CLASSES,
image_size=config.IMAGE_SIZE,
data_format=config.DATA_FORMAT,
bench={
"total": bench_pre + bench_post + bench_inference,
"pre": bench_pre,
"post": bench_post,
"inference": bench_inference,
},
bench=time_stat,
)

image_from_json = ImageFromJson(
Expand All @@ -177,8 +185,8 @@ def run_prediction(input_image, model, config_file, trial=1):
_save_json(output_dir, json_obj)
filename_images = image_from_json(json_obj, raw_images, image_files)
_save_images(output_dir, filename_images)
logger.info("Benchmark avg result(sec) for {} trials: pre_process: {} inference: {} post_process: {} Total: {}"
.format(trial, bench_pre, bench_inference, bench_post, bench_pre + bench_post + bench_inference,))
logger.info("Benchmark avg result(sec) for {} trials".format(trial))
logger.info(time_stat)


@click.command(context_settings=dict(help_option_names=['-h', '--help']))
Expand Down