Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

elems op, rdiv/rsub, unary, set value #42

Merged
merged 7 commits into from
Sep 6, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 32 additions & 37 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,6 @@ ifndef DMLC_CORE
DMLC_CORE = dmlc-core
endif

ifndef RABIT
RABIT = rabit
endif

# use customized config file
include $(config)
Expand Down Expand Up @@ -65,15 +62,14 @@ endif
ENGINE=naive_engine.o
BIN = tests/test_simple_engine
OBJ = narray_function_cpu.o
OBJCXX11 = batch_norm_cpu.o reshape_cpu.o narray.o c_api.o operator.o symbol.o storage.o fully_connected_cpu.o static_graph.o activation_cpu.o graph_executor.o softmax_cpu.o elementwise_sum_cpu.o pooling_cpu.o convolution_cpu.o io.o iter_mnist.o $(ENGINE)
CUOBJ =
OBJCXX11 = narray.o c_api.o operator.o symbol.o storage.o static_graph.o graph_executor.o io.o iter_mnist.o $(ENGINE)
CUOBJ = narray_function_gpu.o
SLIB = lib/libmxnet.so
ALIB = lib/libmxnet.a
LIB_DEP = $(DMLC_CORE)/libdmlc.a

ifeq ($(USE_CUDA), 1)
CUOBJ += batch_norm_gpu.o reshape_gpu.o narray_function_gpu.o fully_connected_gpu.o activation_gpu.o elementwise_sum_gpu.o pooling_gpu.o softmax_gpu.o convolution_gpu.o
endif
ALL_DEP = $(OBJ) $(OBJCXX11) $(LIB_DEP)
# common headers, change them will results in rebuild of all files
COMMON_HEADERS=include/mxnet/*.h src/common/*.h

.PHONY: clean all test lint doc

Expand All @@ -85,47 +81,47 @@ $(DMLC_CORE)/libdmlc.a:
storage.o: src/storage/storage.cc
naive_engine.o: src/dag_engine/naive_engine.cc
dag_engine.o: src/dag_engine/dag_engine.cc
simple_engine.o: src/dag_engine/simple_engine.cc
simple_engine.o: src/dag_engine/simple_engine.cc
narray.o: src/narray/narray.cc
narray_function_cpu.o: src/narray/narray_function.cc src/narray/narray_function-inl.h
narray_function_gpu.o: src/narray/narray_function.cu src/narray/narray_function-inl.h
symbol.o: src/symbol/symbol.cc
graph_executor.o: src/symbol/graph_executor.cc
static_graph.o : src/symbol/static_graph.cc
symbol.o: src/symbol/symbol.cc src/symbol/*.h
graph_executor.o: src/symbol/graph_executor.cc src/symbol/*.h
static_graph.o : src/symbol/static_graph.cc src/symbol/*.h
operator.o: src/operator/operator.cc
c_api.o: src/c_api.cc
fully_connected_cpu.o: src/operator/fully_connected.cc
fully_connected_gpu.o: src/operator/fully_connected.cu
activation_cpu.o: src/operator/activation.cc
activation_gpu.o: src/operator/activation.cu
elementwise_sum_cpu.o: src/operator/elementwise_sum.cc
elementwise_sum_gpu.o: src/operator/elementwise_sum.cu
pooling_cpu.o: src/operator/pooling.cc
pooling_gpu.o: src/operator/pooling.cu
softmax_cpu.o: src/operator/softmax.cc
softmax_gpu.o: src/operator/softmax.cu
convolution_cpu.o: src/operator/convolution.cc
convolution_gpu.o: src/operator/convolution.cu
reshape_cpu.o: src/operator/reshape.cc
reshape_gpu.o: src/operator/reshape.cu
batch_norm_cpu.o: src/operator/batch_norm.cc
batch_norm_gpu.o: src/operator/batch_norm.cu
io.o: src/io/io.cc
iter_mnist.o: src/io/iter_mnist.cc
iter_mnist.o: src/io/iter_mnist.cc src/io/*.h

lib/libmxnet.a: $(OBJ) $(OBJCXX11) $(CUOBJ) $(LIB_DEP)
lib/libmxnet.so: $(OBJ) $(OBJCXX11) $(CUOBJ) $(LIB_DEP)
# Rules for operators
OPERATOR_HDR=$(wildcard src/operator/*-inl.h)
OPERATOR_OBJ=$(patsubst %-inl.h, %_cpu.o, $(OPERATOR_HDR))
OPERATOR_CUOBJ=$(patsubst %-inl.h, %_gpu.o, $(OPERATOR_HDR))

ALL_DEP += $(OPERATOR_OBJ)
ifeq ($(USE_CUDA), 1)
ALL_DEP += $(OPERATOR_CUOBJ) $(CUOBJ)
endif

src/operator/%_cpu.o : src/operator/%.cc src/operator/%-inl.h src/operator/mshadow_op.h src/operator/operator_common.h $(COMMON_HEADERS)
$(CXX) -std=c++0x -c $(CFLAGS) -o $@ $(filter %.cpp %.c %.cc, $^)

src/operator/%_gpu.o : src/operator/%.cu src/operator/%-inl.h src/operator/operator_common.h src/operator/mshadow_op.h $(COMMON_HEADERS)
$(NVCC) -c -o $@ $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" $(filter %.cu, $^)

lib/libmxnet.a: $(ALL_DEP)
lib/libmxnet.so: $(ALL_DEP)

tests/test_storage: tests/test_storage.cc lib/libmxnet.a
tests/test_simple_engine: tests/test_simple_engine.cc lib/libmxnet.a

$(BIN) :
$(CXX) $(CFLAGS) -std=c++0x -o $@ $(filter %.cpp %.o %.c %.a %.cc, $^) $(LDFLAGS)

$(OBJ) :
$(OBJ) : $(COMMON_HEADERS)
$(CXX) -c $(CFLAGS) -o $@ $(filter %.cpp %.c %.cc, $^)

$(OBJCXX11) :
$(OBJCXX11) : $(COMMON_HEADERS)
$(CXX) -std=c++0x -c $(CFLAGS) -o $@ $(filter %.cpp %.c %.cc, $^)

$(SLIB) :
Expand All @@ -134,19 +130,18 @@ $(SLIB) :
$(ALIB): $(OBJ) $(OBJCXX11)
ar cr $@ $+

$(CUOBJ) :
$(CUOBJ) :$(COMMON_HEADERS)
$(NVCC) -c -o $@ $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" $(filter %.cu, $^)

$(CUBIN) :
$(NVCC) -o $@ $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" -Xlinker "$(LDFLAGS)" $(filter %.cu %.cpp %.o, $^)


lint:
python dmlc-core/scripts/lint.py mxnet ${LINT_LANG} include src scripts test python

doxygen:
doxygen doc/Doxyfile

clean:
$(RM) $(OBJ) $(OBJCXX11) $(BIN) $(CUBIN) $(CUOBJ) $(SLIB) $(ALIB) *~ */*~ */*/*~ */*/*/*~
$(RM) $(ALL_DEP) $(SLIB) $(ALIB) *~ */*~ */*/*~ */*/*/*~
cd $(DMLC_CORE); make clean; cd -
20 changes: 19 additions & 1 deletion include/mxnet/narray.h
Original file line number Diff line number Diff line change
Expand Up @@ -269,8 +269,9 @@ class NArray {
friend void BinaryOp(const NArray &lhs, const NArray &rhs, NArray *out);
template<typename OP>
friend void UnaryOp(const NArray &lhs, const NArray &rhs, NArray *out);
template<typename OP>
template<typename OP, bool reverse>
friend void ScalarOp(const NArray &lhs, const real_t &rhs, NArray *out);
friend void SetValueOp(const real_t &rhs, NArray *out);
};

/*!
Expand Down Expand Up @@ -385,6 +386,23 @@ struct NArrayFunctionReg
num_mutate_vars(0),
num_scalars(0),
type_mask(0) {}
/*!
* \brief set the function body to a NArray setvalue function
* this will also auto set the parameters correctly
* \param fsetvalue function body to set
* \return ref to the registered entry, used to set properties
*/
inline NArrayFunctionReg &set_function(void fsetvalue(const real_t &rhs,
NArray *out)) {
body = [fsetvalue] (NArray **used_vars,
real_t *s, NArray **mutate_vars) {
fsetvalue(s[0], mutate_vars[0]);
};
num_mutate_vars = 1; num_scalars = 1;
// type_mask = kNArrayArgBeforeScalar;
this->add_argument("rhs", "real_t", "Right operand to the function.");
return *this;
}
/*!
* \brief set the function body to a binary NArray function
* this will also auto set the parameters correctly
Expand Down
7 changes: 4 additions & 3 deletions include/mxnet/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class Operator {
* \param req the request types of saving operation, can only be kWriteTo or kWriteInplace.
* \param out_data array of output data, pointer is used to indicate that this is holder
* the space of TBlob in out_data must be pre-allocated with InferShape
* \param aux_states Auxiliary states of operator. Normally operator doesn't
* \param aux_states Auxiliary states of operator. Normally operator doesn't
* need, epecial case like Batch Norm requires.
* \sa OpReqType, OpContext
*/
Expand Down Expand Up @@ -411,11 +411,12 @@ struct OperatorPropertyReg
* \endcode
*/
#define MXNET_REGISTER_OP_PROPERTY(name, OperatorPropertyType) \
static ::mxnet::OperatorProperty* __create__ ## OperatorPropertyType ## __() { \
static ::mxnet::OperatorProperty* __create__ ## OperatorProperty ## name ## __() { \
return new OperatorPropertyType; \
} \
DMLC_REGISTRY_REGISTER(::mxnet::OperatorPropertyReg, OperatorPropertyReg, name) \
.set_body(__create__ ## OperatorPropertyType ## __)
.set_body(__create__ ## OperatorProperty ## name ## __)

#endif // DMLC_USE_CXX11
} // namespace mxnet
#endif // MXNET_OPERATOR_H_
2 changes: 2 additions & 0 deletions python/mxnet/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,13 @@
#----------------------------
if sys.version_info[0] == 3:
string_types = str,
numeric_types = (float, int)
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
py_str = lambda x: x.decode('utf-8')
else:
string_types = basestring,
numeric_types = (float, int, long)
py_str = lambda x: x


Expand Down
Empty file modified python/mxnet/executor.py
100755 → 100644
Empty file.
46 changes: 35 additions & 11 deletions python/mxnet/narray.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
from __future__ import absolute_import

import ctypes
import warnings
import sys
from .base import _LIB, string_types
from .base import _LIB, string_types, numeric_types
from .base import c_array, py_str, c_str
from .base import mx_uint, mx_float, NArrayHandle, FunctionHandle
from .base import ctypes2numpy_shared, ctypes2buffer
Expand Down Expand Up @@ -66,15 +67,15 @@ def __del__(self):
def __add__(self, other):
if isinstance(other, NArray):
return NArray._plus(self, other)
elif isinstance(other, float) or isinstance(other, int):
elif isinstance(other, numeric_types):
return NArray._plus_scalar(self, float(other))
else:
raise TypeError('type %s not supported' % str(type(other)))

def __iadd__(self, other):
if isinstance(other, NArray):
return NArray._plus(self, other, out=self)
elif isinstance(other, float) or isinstance(other, int):
elif isinstance(other, numeric_types):
return NArray._plus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
Expand All @@ -85,31 +86,40 @@ def __radd__(self, other):
def __sub__(self, other):
if isinstance(other, NArray):
return NArray._minus(self, other)
elif isinstance(other, float) or isinstance(other, int):
elif isinstance(other, numeric_types):
return NArray._minus_scalar(self, float(other))
else:
raise TypeError('type %s not supported' % str(type(other)))

def __isub__(self, other):
if isinstance(other, NArray):
return NArray._minus(self, other, out=self)
elif isinstance(other, float) or isinstance(other, int):
elif isinstance(other, numeric_types):
return NArray._minus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))

def __rsub__(self, other):
if isinstance(other, numeric_types):
return NArray._rminus_scalar(self, float(other))
else:
raise TypeError('type %s not supported' % str(type(other)))

def __mul__(self, other):
if isinstance(other, NArray):
return NArray._mul(self, other)
elif isinstance(other, float) or isinstance(other, int):
elif isinstance(other, numeric_types):
return NArray._mul_scalar(self, float(other))
else:
raise TypeError('type %s not supported' % str(type(other)))

def __neg__(self):
return NArray._mul_scalar(self, -1.0, out=self)

def __imul__(self, other):
if isinstance(other, NArray):
return NArray._mul(self, other, out=self)
elif isinstance(other, float) or isinstance(other, int):
elif isinstance(other, numeric_types):
return NArray._mul_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
Expand All @@ -120,15 +130,21 @@ def __rmul__(self, other):
def __div__(self, other):
if isinstance(other, NArray):
return NArray._div(self, other)
elif isinstance(other, float) or isinstance(other, int):
elif isinstance(other, numeric_types):
return NArray._div_scalar(self, float(other))
else:
raise TypeError('type %s not supported' % str(type(other)))

def __rdiv__(self, other):
if isinstance(other, numeric_types):
return NArray._rdiv_scalar(self, float(other))
else:
raise TypeError('type %s not supported' % str(type(other)))

def __idiv__(self, other):
if isinstance(other, NArray):
return NArray._div(self, other, out=self)
elif isinstance(other, float) or isinstance(other, int):
elif isinstance(other, numeric_types):
return NArray._div_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
Expand Down Expand Up @@ -163,9 +179,13 @@ def __setitem__(self, in_slice, value):
"""Set narray value"""
if in_slice.step != None:
raise Exception("Set NArray should use empty index array[:] = target_array")
if isinstance(value, NArray) == False:
if isinstance(value, NArray):
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
return NArray._set_value(float(value), out=self)
else:
raise TypeError('type %s not supported' % str(type(value)))
value.copyto(self)

def __getitem__(self, in_slice):
"""Get narray"""
Expand Down Expand Up @@ -238,6 +258,10 @@ def copyto(self, other):
The copy target NArray
"""
if isinstance(other, NArray):
if other.handle is self.handle:
warnings.warn('copy an array to itself, is it intended?',
RuntimeWarning)
return
return NArray._copyto(self, out=other)
elif isinstance(other, Context):
hret = NArray(_new_alloc_handle(self.shape, other, True))
Expand Down
Loading