Skip to content

Commit

Permalink
feat: LSTM over torch , preliminary internal graph representation
Browse files Browse the repository at this point in the history
preliminary steps
clean dot output
allocation of torch modules + forward
simple forward ut ok
lstm from proto trained by torch ok
csvts torch connector, 1
working learning of csvts with torch backend
gpu ok
make lstm statefull if needed
fix iterations display number
cleanup realloc computations
fix name collision when compiled w/ TORCH and w/caffe
can pass labels either at creation of at train call
add some headers for some versions
remove -liomp if cuda version
fix ambiguous var name
load correct file
do not double init params
load directly models on correct device
torch timeseries prediction ok
fix test wrt correct init
do not regenate protoxt if already present : allows not to give net definition at predict time
reload params after realloc
load only if weights are present
small changes as for beniz review
changes for sileht and louisj reviews
  • Loading branch information
fantes authored and sileht committed Sep 10, 2020
1 parent 3514ed4 commit 25faa8b
Show file tree
Hide file tree
Showing 32 changed files with 2,977 additions and 236 deletions.
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -770,12 +770,12 @@ if (USE_TORCH)
set(TORCH_LIB_DEPS ${TORCH_LOCATION}/lib/libtorch.so ${TORCH_LOCATION}/lib/libtorch_cpu.so ${TORCH_LOCATION}/lib/libc10.so -llmdb -lprotobuf)

if (NOT USE_CPU_ONLY AND CUDA_FOUND)
list(APPEND TORCH_LIB_DEPS ${TORCH_LOCATION}/lib/libc10_cuda.so ${TORCH_LOCATION}/lib/libtorch_cuda.so iomp5)
list(APPEND TORCH_LIB_DEPS ${TORCH_LOCATION}/lib/libc10_cuda.so ${TORCH_LOCATION}/lib/libtorch_cuda.so)
else()
list(APPEND TORCH_LIB_DEPS iomp5)
endif()

set(TORCH_INC_DIR ${TORCH_LOCATION}/include/ ${TORCH_LOCATION}/include/torch/csrc/api/include/ ${CMAKE_BINARY_DIR}/pytorch/src/pytorch/torch/include/torch/csrc/api/include/)
set(TORCH_INC_DIR ${TORCH_LOCATION}/include/ ${TORCH_LOCATION}/include/torch/csrc/api/include/ ${CMAKE_BINARY_DIR}/pytorch/src/pytorch/torch/include/torch/csrc/api/include ${TORCH_LOCATION}/.. ${CMAKE_BINARY_DIR}/src)
set(TORCH_LIB_DIR ${TORCH_LOCATION}/lib/)

if (${PB_GENERATOR_TORCH})
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ Current features include:
- [gflags](https://code.google.com/p/gflags/) for command line parsing;
- OpenCV >= 2.4
- [cppnetlib](http://cpp-netlib.org/)
- Boost
- Boost , Boost::graph
- [curl](http://curl.haxx.se/)
- [curlpp](http://www.curlpp.org/)
- [utfcpp](http://utfcpp.sourceforge.net/)
Expand Down
227 changes: 227 additions & 0 deletions examples/graph/recurrent.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,227 @@
name: "recurrent"
layer {
name: "inputl"
type: "MemoryData"
top: "data"
top: "label"
include {
phase: TRAIN
}
memory_data_param {
batch_size: 1
channels: 50
height: 9
width: 1
}
}
layer {
name: "inputl"
type: "MemoryData"
top: "data"
top: "label"
include {
phase: TEST
}
memory_data_param {
batch_size: 1
channels: 50
height: 9
width: 1
}
}
layer {
name: "permute_T_N_data"
type: "Permute"
bottom: "data"
top: "permuted_data"
permute_param {
order: 1
order: 0
order: 2
order: 3
}
}
layer {
name: "slice_timeseries"
type: "Slice"
bottom: "permuted_data"
top: "cont_seq_unshaped"
top: "target_seq"
top: "input_seq"
slice_param {
slice_point: 1
slice_point: 4
axis: 2
}
}
layer {
name: "shape_cont_seq"
type: "Flatten"
bottom: "cont_seq_unshaped"
top: "cont_seq"
flatten_param {
axis: 1
}
}
layer {
name: "LSTM0"
type: "LSTM"
bottom: "input_seq"
bottom: "cont_seq"
top: "LSTM_0"
recurrent_param {
num_output: 50
weight_filler {
type: "uniform"
min: -0.14142136
max: 0.14142136
}
bias_filler {
type: "uniform"
min: -0.14142136
max: 0.14142136
}
}
}
layer {
name: "LSTM1"
type: "LSTM"
bottom: "LSTM_0"
bottom: "cont_seq"
top: "LSTM_1"
recurrent_param {
num_output: 50
weight_filler {
type: "uniform"
min: -0.14142136
max: 0.14142136
}
bias_filler {
type: "uniform"
min: -0.14142136
max: 0.14142136
}
}
}
layer {
name: "affine_2"
type: "InnerProduct"
bottom: "LSTM_1"
top: "rnn_pred"
inner_product_param {
num_output: 3
weight_filler {
type: "uniform"
min: -0.14142136
max: 0.14142136
}
bias_filler {
type: "uniform"
min: -0.14142136
max: 0.14142136
}
axis: 2
}
}
layer {
name: "permute_T_N_rnn_pred"
type: "Permute"
bottom: "rnn_pred"
top: "permuted_rnn_pred"
include {
phase: TRAIN
}
permute_param {
order: 1
order: 0
order: 2
}
}
layer {
name: "permute_T_N_target_seq"
type: "Permute"
bottom: "target_seq"
top: "permuted_target_seq"
include {
phase: TRAIN
}
permute_param {
order: 1
order: 0
order: 2
}
}
layer {
name: "Target_Seq_Dim"
type: "Flatten"
bottom: "permuted_target_seq"
top: "permuted_target_seq_flattened"
include {
phase: TRAIN
}
flatten_param {
axis: 2
}
}
layer {
name: "Loss_Sum_Layer"
type: "Eltwise"
bottom: "permuted_rnn_pred"
bottom: "permuted_target_seq_flattened"
top: "difference"
include {
phase: TRAIN
}
eltwise_param {
operation: SUM
coeff: 1
coeff: -1
}
}
layer {
name: "Loss_Reduction"
type: "Reduction"
bottom: "difference"
top: "summed_difference"
include {
phase: TRAIN
}
reduction_param {
operation: ASUM
axis: 1
}
}
layer {
name: "Loss_Scale"
type: "Scale"
bottom: "summed_difference"
top: "scaled_difference"
param {
lr_mult: 0
decay_mult: 0
}
include {
phase: TRAIN
}
scale_param {
axis: 0
filler {
type: "constant"
value: 1
}
bias_term: false
}
}
layer {
name: "Loss_Reduction_Batch"
type: "Reduction"
bottom: "scaled_difference"
top: "loss"
loss_weight: 1
include {
phase: TRAIN
}
reduction_param {
operation: SUM
}
}
27 changes: 26 additions & 1 deletion src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,38 @@ if (USE_NCNN)
endif()
if (USE_TORCH)
list(APPEND ddetect_SOURCES
generators/net_caffe.cc
generators/net_caffe_recurrent.cc
backends/torch/torchlib.cc
backends/torch/torchmodel.cc
backends/torch/torchinputconns.cc
backends/torch/db.cpp
backends/torch/db_lmdb.cpp
)
basegraph.cc
caffegraphinput.cc
backends/torch/torchgraphbackend.cc
graph.cc
)
if (NOT EXISTS ${CMAKE_SOURCE_DIR}/src/caffe.proto)
file(DOWNLOAD https://raw.githubusercontent.com/jolibrain/caffe/master/src/caffe/proto/caffe.proto ${CMAKE_SOURCE_DIR}/src/caffe.proto)
execute_process(COMMAND LD_LIBRARY_PATH=${PROTOBUF_LIB_DIR}:$ENV{LD_LIBRARY_PATH} ${CMAKE_BINARY_DIR}/pytorch/src/pytorch/torch/bin/protoc
--proto_path=${CMAKE_SOURCE_DIR}/src
--cpp_out=${CMAKE_BINARY_DIR}/src ${CMAKE_SOURCE_DIR}/src/caffe.proto
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
)
endif()
set_source_files_properties(
${CMAKE_BINARY_DIR}/src/caffe.pb.cc
${CMAKE_BINARY_DIR}/src/caffe.pb.h
PROPERTIES GENERATED TRUE)
if (NOT USE_CAFFE)
list(APPEND ddetect_SOURCES
${CMAKE_BINARY_DIR}/src/caffe.pb.cc
)
endif()

endif()

if (USE_TENSORRT)

set_source_files_properties(
Expand Down
Loading

0 comments on commit 25faa8b

Please sign in to comment.