From abe7679d8b4c620df96d50843466a7ec90a0b972 Mon Sep 17 00:00:00 2001 From: Kentaro Iizuka Date: Mon, 3 Feb 2020 16:18:52 +0900 Subject: [PATCH 1/3] Remove configs using convert_weight_from_darknet --- .../object_detection/yolo_v2_openimagesv4.py | 242 ----------------- .../yolo_v2_pascalvoc_2007_2012.py | 244 ------------------ 2 files changed, 486 deletions(-) delete mode 100644 lmnet/configs/core/object_detection/yolo_v2_openimagesv4.py delete mode 100644 lmnet/configs/core/object_detection/yolo_v2_pascalvoc_2007_2012.py diff --git a/lmnet/configs/core/object_detection/yolo_v2_openimagesv4.py b/lmnet/configs/core/object_detection/yolo_v2_openimagesv4.py deleted file mode 100644 index 5b8e049bd..000000000 --- a/lmnet/configs/core/object_detection/yolo_v2_openimagesv4.py +++ /dev/null @@ -1,242 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 The Blueoil Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -from easydict import EasyDict -import tensorflow as tf - -from lmnet.common import Tasks -from lmnet.networks.object_detection.yolo_v2 import YoloV2 -from lmnet.datasets.open_images_v4 import OpenImagesV4BoundingBox -from lmnet.data_processor import Sequence -from lmnet.pre_processor import ( - ResizeWithGtBoxes, - DivideBy255, -) -from lmnet.post_processor import ( - FormatYoloV2, - ExcludeLowScoreBox, - NMS, -) -from lmnet.data_augmentor import ( - Brightness, - Color, - Contrast, - FlipLeftRight, - Hue, - SSDRandomCrop, -) - -IS_DEBUG = False - - -NETWORK_CLASS = YoloV2 -DATASET_CLASS = OpenImagesV4BoundingBox - -IMAGE_SIZE = [416, 416] -BATCH_SIZE = 8 -DATA_FORMAT = "NHWC" -TASK = Tasks.OBJECT_DETECTION -CLASSES = DATASET_CLASS(subset="train", batch_size=1).classes -MAX_STEPS = 1000000 -SAVE_CHECKPOINT_STEPS = 50000 -KEEP_CHECKPOINT_MAX = 5 -TEST_STEPS = 10000 -SUMMARISE_STEPS = 1000 - -PRE_PROCESSOR = Sequence([ - ResizeWithGtBoxes(size=IMAGE_SIZE), - DivideBy255(), -]) -anchors = [ - (1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071) -] -score_threshold = 0.05 -nms_iou_threshold = 0.5 -nms_max_output_size = 100 -POST_PROCESSOR = Sequence([ - FormatYoloV2( - image_size=IMAGE_SIZE, - classes=CLASSES, - anchors=anchors, - data_format=DATA_FORMAT, - ), - ExcludeLowScoreBox(threshold=score_threshold), - NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,), -]) - -# for debug -# IS_DEBUG = True -# SUMMARISE_STEPS = 1 - - -# pretrain -IS_PRETRAIN = False -PRETRAIN_VARS = [ - 'block_1/conv/kernel:0', - 'block_1/bn/beta:0', - 'block_1/bn/gamma:0', - 'block_1/bn/moving_mean:0', - 'block_1/bn/moving_variance:0', - 'block_2/conv/kernel:0', - 'block_2/bn/beta:0', - 'block_2/bn/gamma:0', - 'block_2/bn/moving_mean:0', - 'block_2/bn/moving_variance:0', - 'block_3/conv/kernel:0', - 'block_3/bn/beta:0', - 'block_3/bn/gamma:0', - 'block_3/bn/moving_mean:0', - 'block_3/bn/moving_variance:0', - 'block_4/conv/kernel:0', - 'block_4/bn/beta:0', - 'block_4/bn/gamma:0', - 'block_4/bn/moving_mean:0', - 'block_4/bn/moving_variance:0', - 'block_5/conv/kernel:0', - 'block_5/bn/beta:0', - 'block_5/bn/gamma:0', - 'block_5/bn/moving_mean:0', - 'block_5/bn/moving_variance:0', - 'block_6/conv/kernel:0', - 'block_6/bn/beta:0', - 'block_6/bn/gamma:0', - 'block_6/bn/moving_mean:0', - 'block_6/bn/moving_variance:0', - 'block_7/conv/kernel:0', - 'block_7/bn/beta:0', - 'block_7/bn/gamma:0', - 'block_7/bn/moving_mean:0', - 'block_7/bn/moving_variance:0', - 'block_8/conv/kernel:0', - 'block_8/bn/beta:0', - 'block_8/bn/gamma:0', - 'block_8/bn/moving_mean:0', - 'block_8/bn/moving_variance:0', - 'block_9/conv/kernel:0', - 'block_9/bn/beta:0', - 'block_9/bn/gamma:0', - 'block_9/bn/moving_mean:0', - 'block_9/bn/moving_variance:0', - 'block_10/conv/kernel:0', - 'block_10/bn/beta:0', - 'block_10/bn/gamma:0', - 'block_10/bn/moving_mean:0', - 'block_10/bn/moving_variance:0', - 'block_11/conv/kernel:0', - 'block_11/bn/beta:0', - 'block_11/bn/gamma:0', - 'block_11/bn/moving_mean:0', - 'block_11/bn/moving_variance:0', - 'block_12/conv/kernel:0', - 'block_12/bn/beta:0', - 'block_12/bn/gamma:0', - 'block_12/bn/moving_mean:0', - 'block_12/bn/moving_variance:0', - 'block_13/conv/kernel:0', - 'block_13/bn/beta:0', - 'block_13/bn/gamma:0', - 'block_13/bn/moving_mean:0', - 'block_13/bn/moving_variance:0', - 'block_14/conv/kernel:0', - 'block_14/bn/beta:0', - 'block_14/bn/gamma:0', - 'block_14/bn/moving_mean:0', - 'block_14/bn/moving_variance:0', - 'block_15/conv/kernel:0', - 'block_15/bn/beta:0', - 'block_15/bn/gamma:0', - 'block_15/bn/moving_mean:0', - 'block_15/bn/moving_variance:0', - 'block_16/conv/kernel:0', - 'block_16/bn/beta:0', - 'block_16/bn/gamma:0', - 'block_16/bn/moving_mean:0', - 'block_16/bn/moving_variance:0', - 'block_17/conv/kernel:0', - 'block_17/bn/beta:0', - 'block_17/bn/gamma:0', - 'block_17/bn/moving_mean:0', - 'block_17/bn/moving_variance:0', - 'block_18/conv/kernel:0', - 'block_18/bn/beta:0', - 'block_18/bn/gamma:0', - 'block_18/bn/moving_mean:0', - 'block_18/bn/moving_variance:0', - 'block_19/conv/kernel:0', - 'block_19/bn/beta:0', - 'block_19/bn/gamma:0', - 'block_19/bn/moving_mean:0', - 'block_19/bn/moving_variance:0', - 'block_20/conv/kernel:0', - 'block_20/bn/beta:0', - 'block_20/bn/gamma:0', - 'block_20/bn/moving_mean:0', - 'block_20/bn/moving_variance:0', - 'block_21/conv/kernel:0', - 'block_21/bn/beta:0', - 'block_21/bn/gamma:0', - 'block_21/bn/moving_mean:0', - 'block_21/bn/moving_variance:0', - 'block_22/conv/kernel:0', - 'block_22/bn/beta:0', - 'block_22/bn/gamma:0', - 'block_22/bn/moving_mean:0', - 'block_22/bn/moving_variance:0', - - # 'conv_23/kernel:0', - # 'conv_23/bias:0', - -] -PRETRAIN_DIR = "saved/convert_weight_from_darknet/yolo_v2/checkpoints" -PRETRAIN_FILE = "save.ckpt" - -NETWORK = EasyDict() -NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer -NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9, "learning_rate": 1e-5} -# NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant -# In the origianl yolov2 Paper, with a starting learning rate of 10−3, dividing it by 10 at 60 and 90 epochs. -# Train data num per epoch is 16551 -# NETWORK.LEARNING_RATE_KWARGS = { -# "values": [1e-4, 1e-4], -# "boundaries": [10000], -# } -NETWORK.IMAGE_SIZE = IMAGE_SIZE -NETWORK.BATCH_SIZE = BATCH_SIZE -NETWORK.DATA_FORMAT = DATA_FORMAT -NETWORK.ANCHORS = anchors -NETWORK.OBJECT_SCALE = 5.0 -NETWORK.NO_OBJECT_SCALE = 1.0 -NETWORK.CLASS_SCALE = 1.0 -NETWORK.COORDINATE_SCALE = 1.0 -NETWORK.LOSS_IOU_THRESHOLD = 0.6 -NETWORK.WEIGHT_DECAY_RATE = 0.0005 -NETWORK.SCORE_THRESHOLD = score_threshold -NETWORK.NMS_IOU_THRESHOLD = nms_iou_threshold -NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size -NETWORK.LOSS_WARMUP_STEPS = int(12800 / BATCH_SIZE) - -# dataset -DATASET = EasyDict() -DATASET.BATCH_SIZE = BATCH_SIZE -DATASET.DATA_FORMAT = DATA_FORMAT -DATASET.PRE_PROCESSOR = PRE_PROCESSOR -DATASET.AUGMENTOR = Sequence([ - FlipLeftRight(), - Brightness((0.75, 1.25)), - Color((0.75, 1.25)), - Contrast((0.75, 1.25)), - Hue((-10, 10)), - SSDRandomCrop(min_crop_ratio=0.7), -]) diff --git a/lmnet/configs/core/object_detection/yolo_v2_pascalvoc_2007_2012.py b/lmnet/configs/core/object_detection/yolo_v2_pascalvoc_2007_2012.py deleted file mode 100644 index 174eb4a08..000000000 --- a/lmnet/configs/core/object_detection/yolo_v2_pascalvoc_2007_2012.py +++ /dev/null @@ -1,244 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 The Blueoil Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -from easydict import EasyDict -import tensorflow as tf - -from lmnet.common import Tasks -from lmnet.networks.object_detection.yolo_v2 import YoloV2 -from lmnet.datasets.pascalvoc_2007_2012 import Pascalvoc20072012 -from lmnet.data_processor import Sequence -from lmnet.pre_processor import ( - ResizeWithGtBoxes, - DivideBy255, -) -from lmnet.post_processor import ( - FormatYoloV2, - ExcludeLowScoreBox, - NMS, -) -from lmnet.data_augmentor import ( - Brightness, - Color, - Contrast, - FlipLeftRight, - Hue, - SSDRandomCrop, -) - -IS_DEBUG = False - -NETWORK_CLASS = YoloV2 -DATASET_CLASS = Pascalvoc20072012 - -IMAGE_SIZE = [416, 416] -BATCH_SIZE = 8 -DATA_FORMAT = "NHWC" -TASK = Tasks.OBJECT_DETECTION -CLASSES = DATASET_CLASS.classes - -MAX_STEPS = 1000000 -SAVE_CHECKPOINT_STEPS = 50000 -KEEP_CHECKPOINT_MAX = 5 -TEST_STEPS = 10000 -SUMMARISE_STEPS = 1000 - - -# for debug -# IS_DEBUG = True -# SUMMARISE_STEPS = 1 - - -# pretrain -IS_PRETRAIN = True -PRETRAIN_VARS = [ - 'block_1/conv/kernel:0', - 'block_1/bn/beta:0', - 'block_1/bn/gamma:0', - 'block_1/bn/moving_mean:0', - 'block_1/bn/moving_variance:0', - 'block_2/conv/kernel:0', - 'block_2/bn/beta:0', - 'block_2/bn/gamma:0', - 'block_2/bn/moving_mean:0', - 'block_2/bn/moving_variance:0', - 'block_3/conv/kernel:0', - 'block_3/bn/beta:0', - 'block_3/bn/gamma:0', - 'block_3/bn/moving_mean:0', - 'block_3/bn/moving_variance:0', - 'block_4/conv/kernel:0', - 'block_4/bn/beta:0', - 'block_4/bn/gamma:0', - 'block_4/bn/moving_mean:0', - 'block_4/bn/moving_variance:0', - 'block_5/conv/kernel:0', - 'block_5/bn/beta:0', - 'block_5/bn/gamma:0', - 'block_5/bn/moving_mean:0', - 'block_5/bn/moving_variance:0', - 'block_6/conv/kernel:0', - 'block_6/bn/beta:0', - 'block_6/bn/gamma:0', - 'block_6/bn/moving_mean:0', - 'block_6/bn/moving_variance:0', - 'block_7/conv/kernel:0', - 'block_7/bn/beta:0', - 'block_7/bn/gamma:0', - 'block_7/bn/moving_mean:0', - 'block_7/bn/moving_variance:0', - 'block_8/conv/kernel:0', - 'block_8/bn/beta:0', - 'block_8/bn/gamma:0', - 'block_8/bn/moving_mean:0', - 'block_8/bn/moving_variance:0', - 'block_9/conv/kernel:0', - 'block_9/bn/beta:0', - 'block_9/bn/gamma:0', - 'block_9/bn/moving_mean:0', - 'block_9/bn/moving_variance:0', - 'block_10/conv/kernel:0', - 'block_10/bn/beta:0', - 'block_10/bn/gamma:0', - 'block_10/bn/moving_mean:0', - 'block_10/bn/moving_variance:0', - 'block_11/conv/kernel:0', - 'block_11/bn/beta:0', - 'block_11/bn/gamma:0', - 'block_11/bn/moving_mean:0', - 'block_11/bn/moving_variance:0', - 'block_12/conv/kernel:0', - 'block_12/bn/beta:0', - 'block_12/bn/gamma:0', - 'block_12/bn/moving_mean:0', - 'block_12/bn/moving_variance:0', - 'block_13/conv/kernel:0', - 'block_13/bn/beta:0', - 'block_13/bn/gamma:0', - 'block_13/bn/moving_mean:0', - 'block_13/bn/moving_variance:0', - 'block_14/conv/kernel:0', - 'block_14/bn/beta:0', - 'block_14/bn/gamma:0', - 'block_14/bn/moving_mean:0', - 'block_14/bn/moving_variance:0', - 'block_15/conv/kernel:0', - 'block_15/bn/beta:0', - 'block_15/bn/gamma:0', - 'block_15/bn/moving_mean:0', - 'block_15/bn/moving_variance:0', - 'block_16/conv/kernel:0', - 'block_16/bn/beta:0', - 'block_16/bn/gamma:0', - 'block_16/bn/moving_mean:0', - 'block_16/bn/moving_variance:0', - 'block_17/conv/kernel:0', - 'block_17/bn/beta:0', - 'block_17/bn/gamma:0', - 'block_17/bn/moving_mean:0', - 'block_17/bn/moving_variance:0', - 'block_18/conv/kernel:0', - 'block_18/bn/beta:0', - 'block_18/bn/gamma:0', - 'block_18/bn/moving_mean:0', - 'block_18/bn/moving_variance:0', - 'block_19/conv/kernel:0', - 'block_19/bn/beta:0', - 'block_19/bn/gamma:0', - 'block_19/bn/moving_mean:0', - 'block_19/bn/moving_variance:0', - 'block_20/conv/kernel:0', - 'block_20/bn/beta:0', - 'block_20/bn/gamma:0', - 'block_20/bn/moving_mean:0', - 'block_20/bn/moving_variance:0', - 'block_21/conv/kernel:0', - 'block_21/bn/beta:0', - 'block_21/bn/gamma:0', - 'block_21/bn/moving_mean:0', - 'block_21/bn/moving_variance:0', - 'block_22/conv/kernel:0', - 'block_22/bn/beta:0', - 'block_22/bn/gamma:0', - 'block_22/bn/moving_mean:0', - 'block_22/bn/moving_variance:0', - - # 'conv_23/kernel:0', - # 'conv_23/bias:0', - -] -PRETRAIN_DIR = "saved/convert_weight_from_darknet/yolo_v2/checkpoints" -PRETRAIN_FILE = "save.ckpt" - -PRE_PROCESSOR = Sequence([ - ResizeWithGtBoxes(size=IMAGE_SIZE), - DivideBy255() -]) -anchors = [ - (1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071) -] -score_threshold = 0.05 -nms_iou_threshold = 0.5 -nms_max_output_size = 100 -POST_PROCESSOR = Sequence([ - FormatYoloV2( - image_size=IMAGE_SIZE, - classes=CLASSES, - anchors=anchors, - data_format=DATA_FORMAT, - ), - ExcludeLowScoreBox(threshold=score_threshold), - NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,), -]) - -NETWORK = EasyDict() -NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer -NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9, "learning_rate": 1e-4} -# NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant -# In the origianl yolov2 Paper, with a starting learning rate of 10−3, dividing it by 10 at 60 and 90 epochs. -# Train data num per epoch is 16551 -# NETWORK.LEARNING_RATE_KWARGS = { -# "values": [1e-4, 1e-4], -# "boundaries": [10000], -# } -NETWORK.IMAGE_SIZE = IMAGE_SIZE -NETWORK.BATCH_SIZE = BATCH_SIZE -NETWORK.DATA_FORMAT = DATA_FORMAT -NETWORK.ANCHORS = anchors -NETWORK.OBJECT_SCALE = 5.0 -NETWORK.NO_OBJECT_SCALE = 1.0 -NETWORK.CLASS_SCALE = 1.0 -NETWORK.COORDINATE_SCALE = 1.0 -NETWORK.LOSS_IOU_THRESHOLD = 0.6 -NETWORK.WEIGHT_DECAY_RATE = 0.0005 -NETWORK.SCORE_THRESHOLD = score_threshold -NETWORK.NMS_IOU_THRESHOLD = nms_iou_threshold -NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size -NETWORK.LOSS_WARMUP_STEPS = int(12800 / BATCH_SIZE) - -# dataset -DATASET = EasyDict() -DATASET.BATCH_SIZE = BATCH_SIZE -DATASET.DATA_FORMAT = DATA_FORMAT -DATASET.PRE_PROCESSOR = PRE_PROCESSOR -DATASET.AUGMENTOR = Sequence([ - FlipLeftRight(), - Brightness((0.75, 1.25)), - Color((0.75, 1.25)), - Contrast((0.75, 1.25)), - Hue((-10, 10)), - SSDRandomCrop(min_crop_ratio=0.7), -]) -DATASET.ENABLE_PREFETCH = True From fc30405913ee769ea74b4228351affd05c65979e Mon Sep 17 00:00:00 2001 From: Kentaro Iizuka Date: Mon, 3 Feb 2020 16:23:52 +0900 Subject: [PATCH 2/3] Remove convert_weight_from_darknet related scripts --- lmnet/README.md | 29 --- lmnet/executor/convert_weight_from_darknet.py | 173 ------------------ lmnet/tests/test_configs.py | 10 - lmnet/third_party/tensorflow-on-arm | 1 + 4 files changed, 1 insertion(+), 212 deletions(-) delete mode 100644 lmnet/executor/convert_weight_from_darknet.py create mode 160000 lmnet/third_party/tensorflow-on-arm diff --git a/lmnet/README.md b/lmnet/README.md index 676123795..b9dcf0f7f 100644 --- a/lmnet/README.md +++ b/lmnet/README.md @@ -15,7 +15,6 @@ This project is developed as one part of Blueoil project -- the deep learning mo * Utils * `measure_latency.py`: entry point script for measuring inference latency. - * `convert_weight_from_darknet.py`: entry point script for convert weight format form darknet framework. - - - @@ -343,34 +342,6 @@ e.g. `PYTHONPATH=. python executor/predict.py -in ./dataset/images -o ./outputs -i lmnet_cifar10` -# Convert weight from darknet -Weight converter form darknet framework to tensorflow checkpoints file. -You can convert [Yolov2](https://pjreddie.com/darknet/yolov2/) and [Darknet19](https://pjreddie.com/darknet/imagenet/#darknet19_448) network weights. - -Please download darknet weights at `inputs` dir. -``` -cd inputs -wget http://pjreddie.com/media/files/darknet19_448.weights -wget https://pjreddie.com/media/files/yolo-voc.weights -``` - -After execute `convert_weight_from_darknet.py`, You can get checkpoints file on -* darknet19: `${OUTPUT_DIR}/convert_weight_from_darknet/darknet19/checkpoints/save.ckpt` -* yolov2: `${OUTPUT_DIR}/convert_weight_from_darknet/yolo_v2/checkpoints/save.ckpt` - -``` -# PYTHONPATH=. python executor/convert_weight_from_darknet.py -h -Usage: convert_weight_from_darknet.py [OPTIONS] - -Options: - -m, --model [yolov2|darknet19] yolo2 or darknet19 [required] - -h, --help Show this message and exit. -``` - -e.g. -`PYTHONPATH=. python executor/convert_weight_from_darknet.py -m yolov2` - - # Profiling model Profiling a trained model. diff --git a/lmnet/executor/convert_weight_from_darknet.py b/lmnet/executor/convert_weight_from_darknet.py deleted file mode 100644 index afba40b36..000000000 --- a/lmnet/executor/convert_weight_from_darknet.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 The Blueoil Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -import os -import struct - -import click -import numpy as np -import tensorflow as tf - -from lmnet import environment -from lmnet.networks.classification.darknet import Darknet -from lmnet.networks.object_detection.yolo_v2 import YoloV2 -from lmnet.utils import config as config_util -from lmnet.utils import executor - - -def convert(config, weight_file): - ModelClass = config.NETWORK_CLASS - network_kwargs = dict((key.lower(), val) for key, val in config.NETWORK.items()) - - graph = tf.Graph() - with graph.as_default(): - - if ModelClass is YoloV2: - classes = list(range(0, 20)) - - elif ModelClass is Darknet: - classes = list(range(0, 1000)) - - model = ModelClass( - classes=classes, - is_debug=True, - **network_kwargs, - ) - global_step = tf.Variable(0, name="global_step", trainable=False) # NOQA - - is_training = tf.constant(False, name="is_training") - - images_placeholder, labels_placeholder = model.placeholders() - - model.inference(images_placeholder, is_training) - - init_op = tf.global_variables_initializer() - - saver = tf.compat.v1.train.Saver(max_to_keep=None) - - variables = tf.global_variables() - - session_config = None - sess = tf.Session(graph=graph, config=session_config) - sess.run([init_op, ]) - suffixes = ['bias', 'beta', 'gamma', 'moving_mean', 'moving_variance', 'kernel'] - convert_variables = [] - for var in variables: - if var.op.name == 'global_step': - continue - convert_variables.append(var) - - def sort_key(var): - block_number = var.op.name.split("/")[0].split("_")[1] - for i, suffix in enumerate(suffixes): - if var.op.name.endswith(suffix): - return int(block_number) * 5 + i - - convert_variables.sort(key=sort_key) - for var in convert_variables: - print(var.op.name) - - with open(weight_file, 'rb') as fopen: - major, minor, revision, seen = struct.unpack('4i', fopen.read(16)) - print("major = %d, minor = %d, revision = %d, seen = %d" % (major, minor, revision, seen)) - - # You can only use the version of darknet. - assert major == 0 - assert minor == 1 - - total = 0 - for var in convert_variables: - remaining = os.fstat(fopen.fileno()).st_size - fopen.tell() - print("processing layer {}".format(var.op.name)) - print("remaining: {} bytes.".format(remaining)) - - shape = var.get_shape().as_list() - cnt = np.multiply.reduce(shape) - total += cnt - print("{}: shape: {}. num elements: {}".format(var.op.name, str(shape), cnt)) - data = struct.unpack('%df' % cnt, fopen.read(4 * cnt)) - data = np.array(data, dtype=np.float32) - if "kernel" in var.op.name: - kernel_size_1, kernel_size_2, input_channel, output_channel = shape - data = data.reshape([output_channel, input_channel, kernel_size_1, kernel_size_2]) - data = np.transpose(data, [2, 3, 1, 0]) - - # if yolov2 last layer - if "conv_23" in var.op.name: - num_anchors = 5 - if "kernel" in var.op.name: - weights = data.reshape([kernel_size_1, kernel_size_2, input_channel, num_anchors, -1]) - boxes = weights[:, :, :, :, 0:4] - conf = np.expand_dims(weights[:, :, :, :, 4], -1) - classes = weights[:, :, :, :, 5:] - data = np.concatenate([classes, conf, boxes], -1) - data = data.reshape([kernel_size_1, kernel_size_2, input_channel, output_channel]) - - if "bias" in var.op.name: - biases = data.reshape([num_anchors, -1]) - boxes = biases[:, 0:4] - conf = np.expand_dims(biases[:, 4], -1) - classes = biases[:, 5:] - data = np.concatenate([classes, conf, boxes, ], -1).reshape([-1]) - - sess.run(var.assign(data)) - print("total: {} elements".format(total)) - - print("") - print("{} elements assigned".format(total)) - remaining = os.fstat(fopen.fileno()).st_size - fopen.tell() - print("remaining: {}".format(remaining)) - assert remaining == 0 - - checkpoint_file = "save.ckpt" - saver.save(sess, os.path.join(environment.CHECKPOINTS_DIR, checkpoint_file)) - - print("-------- output --------") - print("save checkpoint to : {}".format(os.path.join(environment.CHECKPOINTS_DIR, checkpoint_file))) - - -@click.command(context_settings=dict(help_option_names=['-h', '--help'])) -@click.option( - "-m", - "--model", - help="yolo2 or darknet19", - type=click.Choice(["yolov2", "darknet19"]), - required=True, -) -def main(model): - if model == "yolov2": - weight_file = 'inputs/yolo-voc.weights' - experiment_id = "convert_weight_from_darknet/yolo_v2" - config_file = "configs/convert_weight_from_darknet/yolo_v2.py" - - if model == "darknet19": - weight_file = 'inputs/darknet19_448.weights' - experiment_id = "convert_weight_from_darknet/darknet19" - config_file = "configs/convert_weight_from_darknet/darknet19.py" - - recreate = True - environment.init(experiment_id) - executor.prepare_dirs(recreate) - - config = config_util.load(config_file) - config_util.display(config) - - config_util.copy_to_experiment_dir(config_file) - - convert(config, weight_file) - - -if __name__ == '__main__': - main() diff --git a/lmnet/tests/test_configs.py b/lmnet/tests/test_configs.py index 435b4a9d3..705672593 100644 --- a/lmnet/tests/test_configs.py +++ b/lmnet/tests/test_configs.py @@ -34,15 +34,6 @@ def test_core_configs(): check_config(config, "inference") -def test_convert_weight_from_darknet_configs(): - """Test that all config files in `configs/convert_weight_from_darknet` dir include requirement keys.""" - dir_path = os.path.join("configs", "convert_weight_from_darknet") - - for config_file in glob.glob(os.path.join(dir_path, "**", "*.py"), recursive=True): - config = _load_py(config_file) - check_config(config, "inference") - - def test_example_config(): """Test that example config python file include requirement keys.""" @@ -111,7 +102,6 @@ def test_example_object_detection_config_yaml(): if __name__ == '__main__': test_core_configs() - test_convert_weight_from_darknet_configs() test_example_config() test_example_classification_config_yaml() test_example_object_detection_config_yaml() diff --git a/lmnet/third_party/tensorflow-on-arm b/lmnet/third_party/tensorflow-on-arm new file mode 160000 index 000000000..1d023b9db --- /dev/null +++ b/lmnet/third_party/tensorflow-on-arm @@ -0,0 +1 @@ +Subproject commit 1d023b9db8a6d893022e838a186b69db4b074f18 From 94cc6a39432bbea1c5bf1d8271339dc2da493a76 Mon Sep 17 00:00:00 2001 From: Kentaro Iizuka Date: Mon, 3 Feb 2020 17:01:14 +0900 Subject: [PATCH 3/3] revert submodule --- lmnet/third_party/tensorflow-on-arm | 1 - 1 file changed, 1 deletion(-) delete mode 160000 lmnet/third_party/tensorflow-on-arm diff --git a/lmnet/third_party/tensorflow-on-arm b/lmnet/third_party/tensorflow-on-arm deleted file mode 160000 index 1d023b9db..000000000 --- a/lmnet/third_party/tensorflow-on-arm +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1d023b9db8a6d893022e838a186b69db4b074f18