Skip to content
This repository has been archived by the owner on Jan 15, 2024. It is now read-only.

Commit

Permalink
Update benchmark_utils.py
Browse files Browse the repository at this point in the history
  • Loading branch information
sxjscience committed Aug 10, 2020
1 parent 15272f1 commit 1cf5c7b
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions scripts/benchmarks/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -719,7 +719,7 @@ def run_forward():
# cpu
memory_bytes = measure_peak_memory_cpu(run_forward)
memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
return np.mean(runtimes), memory
return np.min(runtimes) / 3.0, memory

def _train_speed_memory(self, model_name: str, batch_size: int, sequence_length: int)\
-> Tuple[float, Memory]:
Expand Down Expand Up @@ -794,7 +794,7 @@ def train_step():
else:
raise NotImplementedError
timeit.repeat(train_step, repeat=1, number=3)
runtimes = timeit.repeat(train_step, repeat=self._repeat, number=10)
runtimes = timeit.repeat(train_step, repeat=self._repeat, number=3)

# Profile memory
if self._use_gpu:
Expand All @@ -810,7 +810,7 @@ def train_step():
# cpu
memory_bytes = measure_peak_memory_cpu(train_step)
memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
return np.mean(runtimes), memory
return np.min(runtimes) / 3.0, memory

def inference_speed_memory(self, *args, **kwargs) -> float:
return separate_process_wrapper_fn(self._inference_speed_memory, False)(*args, **kwargs)
Expand Down

0 comments on commit 1cf5c7b

Please sign in to comment.