Skip to content

Commit

Permalink
Reduce memory allocation for models to amount needed
Browse files Browse the repository at this point in the history
  • Loading branch information
SippieCup committed Apr 20, 2020
1 parent 29f4380 commit d570db5
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions selfdrive/modeld/runners/keras_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ def run_loop(m):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
if os.path.splitext(os.path.basename(sys.argv[1]))[0]== "supercombo":
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2548)])
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1772)])
else:
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=256)])
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=306)])
with open(f"{os.path.splitext(sys.argv[1])[0]}.model.keras", "r") as json_file:
m = model_from_json(json_file.read())
m.load_weights(f"{os.path.splitext(sys.argv[1])[0]}.weights.keras")
Expand Down

0 comments on commit d570db5

Please sign in to comment.