diff options
author | Guo, Yejun <yejun.guo@intel.com> | 2019-10-09 22:08:11 +0800 |
---|---|---|
committer | Pedro Arthur <bygrandao@gmail.com> | 2019-10-15 18:56:25 -0300 |
commit | 3fd5ac7e92049b4f31026acdb53a762289f71448 (patch) | |
tree | 49916c1eca3e45d745c85a10da232c0d981fb3dd | |
parent | b78dc27bba2cc612643df7e9c84addc142273e71 (diff) | |
download | ffmpeg-3fd5ac7e92049b4f31026acdb53a762289f71448.tar.gz |
avfilter/dnn: unify the layer execution function in native mode
Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
-rw-r--r-- | libavfilter/dnn/Makefile | 1 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native.c | 34 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native.h | 4 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layer_conv2d.c | 4 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layer_conv2d.h | 3 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layer_depth2space.c | 5 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layer_depth2space.h | 3 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layer_maximum.c | 4 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layer_maximum.h | 3 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layer_pad.c | 5 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layer_pad.h | 4 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layers.c | 34 | ||||
-rw-r--r-- | libavfilter/dnn/dnn_backend_native_layers.h | 32 | ||||
-rw-r--r-- | tests/dnn/dnn-layer-conv2d-test.c | 4 | ||||
-rw-r--r-- | tests/dnn/dnn-layer-depth2space-test.c | 4 |
15 files changed, 102 insertions, 42 deletions
diff --git a/libavfilter/dnn/Makefile b/libavfilter/dnn/Makefile index 721094ddc8..171f00e502 100644 --- a/libavfilter/dnn/Makefile +++ b/libavfilter/dnn/Makefile @@ -1,5 +1,6 @@ OBJS-$(CONFIG_DNN) += dnn/dnn_interface.o OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native.o +OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layers.o OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_pad.o OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_conv2d.o OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_depth2space.o diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c index 97549d3077..c8fb956dec 100644 --- a/libavfilter/dnn/dnn_backend_native.c +++ b/libavfilter/dnn/dnn_backend_native.c @@ -29,6 +29,7 @@ #include "dnn_backend_native_layer_conv2d.h" #include "dnn_backend_native_layer_depth2space.h" #include "dnn_backend_native_layer_maximum.h" +#include "dnn_backend_native_layers.h" static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output) { @@ -331,10 +332,6 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output { ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model; int32_t layer; - ConvolutionalParams *conv_params; - DepthToSpaceParams *depth_to_space_params; - LayerPadParams *pad_params; - DnnLayerMaximumParams *maximum_params; uint32_t nb = FFMIN(nb_output, network->nb_output); if (network->layers_num <= 0 || network->operands_num <= 0) @@ -343,30 +340,11 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output return DNN_ERROR; for (layer = 0; layer < network->layers_num; ++layer){ - switch (network->layers[layer].type){ - case DLT_CONV2D: - conv_params = (ConvolutionalParams *)network->layers[layer].params; - convolve(network->operands, network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, conv_params); - break; - case DLT_DEPTH_TO_SPACE: - depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params; - depth_to_space(network->operands, network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, depth_to_space_params->block_size); - break; - case DLT_MIRROR_PAD: - pad_params = (LayerPadParams *)network->layers[layer].params; - dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, pad_params); - break; - case DLT_MAXIMUM: - maximum_params = (DnnLayerMaximumParams *)network->layers[layer].params; - dnn_execute_layer_maximum(network->operands, network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, maximum_params); - break; - case DLT_INPUT: - return DNN_ERROR; - } + DNNLayerType layer_type = network->layers[layer].type; + layer_funcs[layer_type](network->operands, + network->layers[layer].input_operand_indexes, + network->layers[layer].output_operand_index, + network->layers[layer].params); } for (uint32_t i = 0; i < nb; ++i) { diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h index 761e5ed02c..9821390194 100644 --- a/libavfilter/dnn/dnn_backend_native.h +++ b/libavfilter/dnn/dnn_backend_native.h @@ -33,13 +33,15 @@ /** * the enum value of DNNLayerType should not be changed, * the same values are used in convert_from_tensorflow.py + * and, it is used to index the layer execution function pointer. */ typedef enum { DLT_INPUT = 0, DLT_CONV2D = 1, DLT_DEPTH_TO_SPACE = 2, DLT_MIRROR_PAD = 3, - DLT_MAXIMUM = 4 + DLT_MAXIMUM = 4, + DLT_COUNT } DNNLayerType; typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType; diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c index b13b4314ec..594187f5b1 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c +++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c @@ -23,7 +23,8 @@ #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x))) -int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params) +int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters) { float *output; int32_t input_operand_index = input_operand_indexes[0]; @@ -32,6 +33,7 @@ int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t int width = operands[input_operand_index].dims[2]; int channel = operands[input_operand_index].dims[3]; const float *input = operands[input_operand_index].data; + const ConvolutionalParams *conv_params = (const ConvolutionalParams *)parameters; int radius = conv_params->kernel_size >> 1; int src_linesize = width * conv_params->input_num; diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h index 7ddfff38ba..1dd84cb8f6 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h +++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h @@ -35,5 +35,6 @@ typedef struct ConvolutionalParams{ float *biases; } ConvolutionalParams; -int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params); +int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters); #endif diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c index a248764681..37200607b2 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c +++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c @@ -27,9 +27,12 @@ #include "libavutil/avassert.h" #include "dnn_backend_native_layer_depth2space.h" -int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size) +int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters) { float *output; + const DepthToSpaceParams *params = (const DepthToSpaceParams *)parameters; + int block_size = params->block_size; int32_t input_operand_index = input_operand_indexes[0]; int number = operands[input_operand_index].dims[0]; int height = operands[input_operand_index].dims[1]; diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.h b/libavfilter/dnn/dnn_backend_native_layer_depth2space.h index 8708be83b9..c481bf1e5c 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.h +++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.h @@ -34,6 +34,7 @@ typedef struct DepthToSpaceParams{ int block_size; } DepthToSpaceParams; -int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size); +int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters); #endif diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.c b/libavfilter/dnn/dnn_backend_native_layer_maximum.c index a2669af794..6add170319 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_maximum.c +++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.c @@ -27,10 +27,12 @@ #include "libavutil/avassert.h" #include "dnn_backend_native_layer_maximum.h" -int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const DnnLayerMaximumParams *params) +int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters) { const DnnOperand *input = &operands[input_operand_indexes[0]]; DnnOperand *output = &operands[output_operand_index]; + const DnnLayerMaximumParams *params = (const DnnLayerMaximumParams *)parameters; int dims_count; const float *src; float *dst; diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.h b/libavfilter/dnn/dnn_backend_native_layer_maximum.h index 6396e5818c..87f3bf5a80 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_maximum.h +++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.h @@ -37,6 +37,7 @@ typedef struct DnnLayerMaximumParams{ }val; } DnnLayerMaximumParams; -int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const DnnLayerMaximumParams *params); +int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters); #endif diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.c b/libavfilter/dnn/dnn_backend_native_layer_pad.c index c2905a75ea..f5c572728f 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_pad.c +++ b/libavfilter/dnn/dnn_backend_native_layer_pad.c @@ -48,12 +48,13 @@ static int after_get_buddy(int given, int border, LayerPadModeParam mode) } } -int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, - const LayerPadParams *params) +int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters) { int32_t before_paddings; int32_t after_paddings; float* output; + const LayerPadParams *params = (const LayerPadParams *)parameters; // suppose format is <N, H, W, C> int32_t input_operand_index = input_operand_indexes[0]; diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.h b/libavfilter/dnn/dnn_backend_native_layer_pad.h index 7cc8213521..036ff7b86f 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_pad.h +++ b/libavfilter/dnn/dnn_backend_native_layer_pad.h @@ -36,7 +36,7 @@ typedef struct LayerPadParams{ float constant_values; } LayerPadParams; -int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, - const LayerPadParams *params); +int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters); #endif diff --git a/libavfilter/dnn/dnn_backend_native_layers.c b/libavfilter/dnn/dnn_backend_native_layers.c new file mode 100644 index 0000000000..17b91bb7ab --- /dev/null +++ b/libavfilter/dnn/dnn_backend_native_layers.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019 Guo Yejun + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <string.h> +#include "dnn_backend_native_layers.h" +#include "dnn_backend_native_layer_pad.h" +#include "dnn_backend_native_layer_conv2d.h" +#include "dnn_backend_native_layer_depth2space.h" +#include "dnn_backend_native_layer_maximum.h" + +LAYER_EXEC_FUNC layer_funcs[DLT_COUNT] = { + NULL, + dnn_execute_layer_conv2d, + dnn_execute_layer_depth2space, + dnn_execute_layer_pad, + dnn_execute_layer_maximum, +}; diff --git a/libavfilter/dnn/dnn_backend_native_layers.h b/libavfilter/dnn/dnn_backend_native_layers.h new file mode 100644 index 0000000000..3276aeea92 --- /dev/null +++ b/libavfilter/dnn/dnn_backend_native_layers.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 Guo Yejun + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYERS_H +#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYERS_H + +#include <stdint.h> +#include "dnn_backend_native.h" + +typedef int (*LAYER_EXEC_FUNC)(DnnOperand *operands, const int32_t *input_operand_indexes, + int32_t output_operand_index, const void *parameters); + +extern LAYER_EXEC_FUNC layer_funcs[DLT_COUNT]; + +#endif diff --git a/tests/dnn/dnn-layer-conv2d-test.c b/tests/dnn/dnn-layer-conv2d-test.c index afc5391484..9d13da37c8 100644 --- a/tests/dnn/dnn-layer-conv2d-test.c +++ b/tests/dnn/dnn-layer-conv2d-test.c @@ -113,7 +113,7 @@ static int test_with_same_dilate(void) operands[1].data = NULL; input_indexes[0] = 0; - convolve(operands, input_indexes, 1, ¶ms); + dnn_execute_layer_conv2d(operands, input_indexes, 1, ¶ms); output = operands[1].data; for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { @@ -212,7 +212,7 @@ static int test_with_valid(void) operands[1].data = NULL; input_indexes[0] = 0; - convolve(operands, input_indexes, 1, ¶ms); + dnn_execute_layer_conv2d(operands, input_indexes, 1, ¶ms); output = operands[1].data; for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { diff --git a/tests/dnn/dnn-layer-depth2space-test.c b/tests/dnn/dnn-layer-depth2space-test.c index 87118de795..5225ec7b7a 100644 --- a/tests/dnn/dnn-layer-depth2space-test.c +++ b/tests/dnn/dnn-layer-depth2space-test.c @@ -48,6 +48,7 @@ static int test(void) print(list(output.flatten())) */ + DepthToSpaceParams params; DnnOperand operands[2]; int32_t input_indexes[1]; float input[1*5*3*4] = { @@ -79,7 +80,8 @@ static int test(void) operands[1].data = NULL; input_indexes[0] = 0; - depth_to_space(operands, input_indexes, 1, 2); + params.block_size = 2; + dnn_execute_layer_depth2space(operands, input_indexes, 1, ¶ms); output = operands[1].data; for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { |