Skip to content
This repository has been archived by the owner on Dec 1, 2021. It is now read-only.

Fix is_scalar #869

Merged
merged 4 commits into from
Mar 2, 2020
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions blueoil/converter/core/operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def size(self) -> int:
"""Get the whole size of the output data."""
import operator
pred = functools.partial(functools.reduce, operator.mul)
return int(pred(self._shape)) # type: ignore
return int(pred(self._shape, 1)) # type: ignore

@property
def is_variable(self) -> bool:
Expand All @@ -346,8 +346,8 @@ def is_variable(self) -> bool:

@property
def is_scalar(self) -> bool:
"""Return if this node is a scalar node (i.e. `size == 1`)."""
return self.size == 1
"""Return if this node is a scalar node (i.e. `ndim == 0`)."""
return self.ndim == 0

@property
def height(self) -> int:
Expand Down Expand Up @@ -1444,11 +1444,11 @@ def run_forward(self) -> np.ndarray:

@property
def nbit(self) -> int:
return self._input_ops['Y'].data[0]
return np.asscalar(self._input_ops['Y'].data)

@property
def max_v(self) -> float:
return self._input_ops['Z'].data[0]
return np.asscalar(self._input_ops['Z'].data)

@property
def is_monotonic(self) -> bool:
Expand Down Expand Up @@ -2683,7 +2683,7 @@ def __init__(self,
num_split: int = 1) -> None:
"""Init the split operator."""
self._split = num_split
self._axis = input_ops['A'].data[0]
self._axis = np.asscalar(input_ops['A'].data)
super().__init__(name, shape, dtype, input_ops, dimension_format=dimension_format)

def _check_consistency(self) -> None:
Expand Down
1 change: 0 additions & 1 deletion blueoil/converter/generate_project.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ def optimize_graph_step(graph: Graph, config: Config) -> None:
pass_constant_folding(graph)
pass_simplify_batchnorm(graph)


def generate_code_step(graph: Graph, config: Config) -> None:
"""Generate code for the model.

Expand Down
12 changes: 5 additions & 7 deletions blueoil/converter/plugins/tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,16 +273,16 @@ def get_data(self):
else:
dtype = type(self)._TF_TO_NP[self.tensor.dtype]
if self.tensor.dtype == 3:
return np.asarray(self.tensor.int_val, dtype=dtype)
return np.asarray(self.tensor.int_val, dtype=dtype).reshape(self.get_shape())
if self.tensor.dtype == 1:
return np.asarray(self.tensor.float_val, dtype=dtype)
return np.asarray(self.tensor.float_val, dtype=dtype).reshape(self.get_shape())

def get_shape(self) -> List[str]:
"""Get shape info."""
if self.is_placeholder:
return [d.size for d in self.in_.attr.get('shape').shape.dim]
else:
return [d.size for d in self.tensor.tensor_shape.dim] or [self.get_data().size]
return [d.size for d in self.tensor.tensor_shape.dim]

def set_shape(self, val: List[str]) -> None:
"""Set shape info."""
Expand Down Expand Up @@ -467,7 +467,7 @@ def _get_format(self, node: Any, output_format: str) -> Tuple[str, List[str]]:
_default_format = 'NHWC'
_default_w_format = 'HWIO'

rank_to_format = {1: 'C', 2: 'HW', 3: 'HWC', 4: 'NHWC', 5: 'NHWCT'}
rank_to_format = {0: 'Atom', 1: 'C', 2: 'HW', 3: 'HWC', 4: 'NHWC', 5: 'NHWCT'}

def guess_node_format(input_node: Any) -> str:
"""Provide the node format from references
Expand All @@ -477,8 +477,6 @@ def guess_node_format(input_node: Any) -> str:
the format follows 'C', 'HW', and 'HWC' respectively of rank 1, 2, 3.
Note: Ensure the tf node always has valid value of attribute _output_shape defined.
"""
assert len(input_node.get_shape()) != 0, \
f'output shape of {input_node.name} of {input_node.op_type} is not properly defined in .pb file'
node_rank = len(input_node.get_shape())
return out_format if node_rank == len(out_format) else rank_to_format[node_rank]

Expand All @@ -494,7 +492,7 @@ def guess_node_format(input_node: Any) -> str:
elif op_type in ['BinaryMeanScalingQuantizer', 'BinaryChannelWiseMeanScalingQuantizer']:
return _default_w_format, [_default_w_format]
elif op_type in {'QTZ_linear_mid_tread_half'}:
return out_format, [out_format, 'C', 'C']
return out_format, [out_format, 'Atom', 'Atom']
elif op_type == 'Pad':
return out_format, [out_format, 'Padding']
elif op_type == 'Transpose':
Expand Down
11 changes: 0 additions & 11 deletions blueoil/converter/templates/manual/consts/input.tpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,6 @@ limitations under the License.
#include "tensor_view.h"
#include "inputs/{{ node.name }}.h"

{% if node.is_scalar -%}

static {{ node.dtype.cpptype() }} {{ node.name }}_data = {{ node.data[0] }};
static constexpr decltype({{ node.name }}_output)::tensor_info_t<std::size_t> {{ node.name }}_shape;
const TensorView<{{ node.dtype.cpptype() }}, MemoryLayout::Atom>
{{ node.name }}_output(&{{ node.name }}_data, {{ node.name }}_shape);

{% else -%}

{% if node.transposed_data %}

#ifdef RUN_ON_FPGA
Expand Down Expand Up @@ -89,5 +80,3 @@ const TensorView<{{ node.dtype.cpptype() }}, MemoryLayout::{{ node.dimension }}>
{{ node.name }}_shape);

{% endif %}

{%- endif %}
6 changes: 1 addition & 5 deletions blueoil/converter/templates/manual/consts/input.tpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,7 @@ limitations under the License.

#include "global.h"

{% if node.is_scalar -%}

extern const TensorView<{{ node.dtype.cpptype() }}, MemoryLayout::Atom> {{ node.name }}_output;

{% elif node.transposed_data -%}
{% if node.transposed_data -%}

#ifdef RUN_ON_FPGA
extern const TensorView<{{ node.dtype.cpptype() }}, MemoryLayout::{{ node.transposed_dimension_format }}> {{ node.name }}_output;
Expand Down