Skip to content
This repository has been archived by the owner on Dec 1, 2021. It is now read-only.

tf_upgrade_v2 for cmd #875

Merged
merged 3 commits into from
Mar 3, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions blueoil/cmd/convert_weight_from_darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,14 @@ def convert(config, weight_file):

model.inference(images_placeholder, is_training)

init_op = tf.global_variables_initializer()
init_op = tf.compat.v1.global_variables_initializer()

saver = tf.compat.v1.train.Saver(max_to_keep=None)

variables = tf.global_variables()
variables = tf.compat.v1.global_variables()

session_config = None
sess = tf.Session(graph=graph, config=session_config)
sess = tf.compat.v1.Session(graph=graph, config=session_config)
sess.run([init_op, ])
suffixes = ['bias', 'beta', 'gamma', 'moving_mean', 'moving_variance', 'kernel']
convert_variables = []
Expand Down
8 changes: 4 additions & 4 deletions blueoil/cmd/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,15 @@ def evaluate(config, restore_path, output_dir):

metrics_summary_op, metrics_placeholders = executor.prepare_metrics(metrics_ops_dict)

init_op = tf.global_variables_initializer()
reset_metrics_op = tf.local_variables_initializer()
init_op = tf.compat.v1.global_variables_initializer()
reset_metrics_op = tf.compat.v1.local_variables_initializer()
saver = tf.compat.v1.train.Saver(max_to_keep=None)

session_config = None # tf.ConfigProto(log_device_placement=True)
sess = tf.Session(graph=graph, config=session_config)
sess = tf.compat.v1.Session(graph=graph, config=session_config)
sess.run([init_op, reset_metrics_op])

validation_writer = tf.summary.FileWriter(environment.TENSORBOARD_DIR + "/evaluate")
validation_writer = tf.compat.v1.summary.FileWriter(environment.TENSORBOARD_DIR + "/evaluate")

saver.restore(sess, restore_path)

Expand Down
6 changes: 3 additions & 3 deletions blueoil/cmd/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,12 @@ def _export(config, restore_path, image_path):

images_placeholder, _ = model.placeholders()
model.inference(images_placeholder, is_training)
init_op = tf.global_variables_initializer()
init_op = tf.compat.v1.global_variables_initializer()

saver = tf.compat.v1.train.Saver(max_to_keep=50)

session_config = tf.ConfigProto()
sess = tf.Session(graph=graph, config=session_config)
session_config = tf.compat.v1.ConfigProto()
sess = tf.compat.v1.Session(graph=graph, config=session_config)
sess.run(init_op)

saver.restore(sess, restore_path)
Expand Down
4 changes: 2 additions & 2 deletions blueoil/cmd/measure_latency.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,13 @@ def _measure_time(config, restore_path, step_size):
images_placeholder, labels_placeholder = model.placeholders()
output = model.inference(images_placeholder, is_training)

init_op = tf.global_variables_initializer()
init_op = tf.compat.v1.global_variables_initializer()

saver = tf.compat.v1.train.Saver()

session_config = None # tf.ConfigProto(log_device_placement=True)
# session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
sess = tf.Session(graph=graph, config=session_config)
sess = tf.compat.v1.Session(graph=graph, config=session_config)
sess.run(init_op)

if restore_path:
Expand Down
6 changes: 3 additions & 3 deletions blueoil/cmd/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,12 +77,12 @@ def _run(input_dir, output_dir, config, restore_path, save_images):
images_placeholder, _ = model.placeholders()
output_op = model.inference(images_placeholder, is_training)

init_op = tf.global_variables_initializer()
init_op = tf.compat.v1.global_variables_initializer()

saver = tf.compat.v1.train.Saver(max_to_keep=None)

session_config = tf.ConfigProto()
sess = tf.Session(graph=graph, config=session_config)
session_config = tf.compat.v1.ConfigProto()
sess = tf.compat.v1.Session(graph=graph, config=session_config)
sess.run(init_op)
saver.restore(sess, restore_path)

Expand Down
12 changes: 7 additions & 5 deletions blueoil/cmd/profile_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,11 @@ def _profile(config, restore_path, bit, unquant_layers):
images_placeholder, _ = model.placeholders()
model.inference(images_placeholder, is_training)

init_op = tf.global_variables_initializer()
init_op = tf.compat.v1.global_variables_initializer()
saver = tf.compat.v1.train.Saver(max_to_keep=50)

session_config = tf.ConfigProto()
sess = tf.Session(graph=graph, config=session_config)
session_config = tf.compat.v1.ConfigProto()
sess = tf.compat.v1.Session(graph=graph, config=session_config)
sess.run(init_op)

if restore_path:
Expand Down Expand Up @@ -145,7 +145,8 @@ def _save_json(name, image_size, num_classes, node_param_dict, node_flops_dict):


def _profile_flops(graph, res, scopes):
float_prof = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
float_prof = tf.compat.v1.profiler.profile(
graph, options=tf.compat.v1.profiler.ProfileOptionBuilder.float_operation())
float_res_dict = collections.defaultdict(int)
float_res_dict["total"] = float_prof.total_float_ops
for node in float_prof.children:
Expand All @@ -172,7 +173,8 @@ def _profile_flops(graph, res, scopes):


def _profile_params(graph, res, bit, unquant_layers):
prof = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
prof = tf.compat.v1.profiler.profile(
graph, options=tf.compat.v1.profiler.ProfileOptionBuilder.trainable_variables_parameter())

# helper func to make profile res
def helper(node, level):
Expand Down
25 changes: 13 additions & 12 deletions blueoil/cmd/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,8 @@ def start_training(config):

metrics_summary_op, metrics_placeholders = executor.prepare_metrics(metrics_ops_dict)

init_op = tf.global_variables_initializer()
reset_metrics_op = tf.local_variables_initializer()
init_op = tf.compat.v1.global_variables_initializer()
reset_metrics_op = tf.compat.v1.local_variables_initializer()
if use_horovod:
# add Horovod broadcasting variables from rank 0 to all
bcast_global_variables_op = hvd.broadcast_global_variables(0)
Expand All @@ -141,7 +141,7 @@ def start_training(config):
saver = tf.compat.v1.train.Saver(max_to_keep=config.KEEP_CHECKPOINT_MAX)

if config.IS_PRETRAIN:
all_vars = tf.global_variables()
all_vars = tf.compat.v1.global_variables()
pretrain_var_list = [
var for var in all_vars if var.name.startswith(tuple(config.PRETRAIN_VARS))
]
Expand All @@ -152,8 +152,8 @@ def start_training(config):

if use_horovod:
# For distributed training
session_config = tf.ConfigProto(
gpu_options=tf.GPUOptions(
session_config = tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(
allow_growth=True,
visible_device_list=str(hvd.local_rank())
)
Expand All @@ -166,23 +166,24 @@ def start_training(config):
# per_process_gpu_memory_fraction=0.1
# )
# )
session_config = tf.ConfigProto() # tf.ConfigProto(log_device_placement=True)
session_config = tf.compat.v1.ConfigProto() # tf.ConfigProto(log_device_placement=True)
# TODO(wakisaka): XLA JIT
# session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1

sess = tf.Session(graph=graph, config=session_config)
sess = tf.compat.v1.Session(graph=graph, config=session_config)
sess.run([init_op, reset_metrics_op])

if rank == 0:
train_writer = tf.summary.FileWriter(environment.TENSORBOARD_DIR + "/train", sess.graph)
train_writer = tf.compat.v1.summary.FileWriter(environment.TENSORBOARD_DIR + "/train", sess.graph)
if use_train_validation_saving:
train_val_saving_writer = tf.summary.FileWriter(environment.TENSORBOARD_DIR + "/train_validation_saving")
val_writer = tf.summary.FileWriter(environment.TENSORBOARD_DIR + "/validation")
train_val_saving_writer = tf.compat.v1.summary.FileWriter(
environment.TENSORBOARD_DIR + "/train_validation_saving")
val_writer = tf.compat.v1.summary.FileWriter(environment.TENSORBOARD_DIR + "/validation")

if config.IS_PRETRAIN:
print("------- Load pretrain data ----------")
pretrain_saver.restore(sess, os.path.join(config.PRETRAIN_DIR, config.PRETRAIN_FILE))
sess.run(tf.assign(global_step, 0))
sess.run(tf.compat.v1.assign(global_step, 0))

last_step = 0

Expand Down Expand Up @@ -301,7 +302,7 @@ def start_training(config):

if step == 0:
# check create pb on only first step.
minimal_graph = tf.graph_util.convert_variables_to_constants(
minimal_graph = tf.compat.v1.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
["output"],
Expand Down
14 changes: 7 additions & 7 deletions blueoil/cmd/tune_ray.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,14 +116,14 @@ def update_parameters_for_each_trial(network_kwargs, chosen_kwargs):
network_kwargs['optimizer_kwargs'][key] = chosen_kwargs['optimizer_class'][key]
network_kwargs['learning_rate_func'] = chosen_kwargs['learning_rate_func']['scheduler']
base_lr = chosen_kwargs['learning_rate']
if network_kwargs['learning_rate_func'] is tf.train.piecewise_constant:
if network_kwargs['learning_rate_func'] is tf.compat.v1.train.piecewise_constant:
lr_factor = chosen_kwargs['learning_rate_func']['scheduler_factor']
network_kwargs['learning_rate_kwargs']['values'] = [base_lr,
base_lr * lr_factor,
base_lr * lr_factor * lr_factor,
base_lr * lr_factor * lr_factor * lr_factor]
network_kwargs['learning_rate_kwargs']['boundaries'] = chosen_kwargs['learning_rate_func']['scheduler_steps']
elif network_kwargs['learning_rate_func'] is tf.train.polynomial_decay:
elif network_kwargs['learning_rate_func'] is tf.compat.v1.train.polynomial_decay:
network_kwargs['learning_rate_kwargs']['learning_rate'] = base_lr
network_kwargs['learning_rate_kwargs']['power'] = chosen_kwargs['learning_rate_func']['scheduler_power']
network_kwargs['learning_rate_kwargs']['decay_steps'] = chosen_kwargs['learning_rate_func']['scheduler_decay']
Expand Down Expand Up @@ -210,12 +210,12 @@ def _setup(self, config):
self.metrics_ops_dict = metrics_ops_dict
self.metrics_update_op = metrics_update_op

init_op = tf.global_variables_initializer()
self.reset_metrics_op = tf.local_variables_initializer()
init_op = tf.compat.v1.global_variables_initializer()
self.reset_metrics_op = tf.compat.v1.local_variables_initializer()

session_config = tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True))
self.sess = tf.Session(config=session_config)
session_config = tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))
self.sess = tf.compat.v1.Session(config=session_config)
self.sess.run([init_op, self.reset_metrics_op])
self.iterations = 0
self.saver = tf.compat.v1.train.Saver()
Expand Down