1 | /* |
---|
2 | Copyright (C) 2018 Paul Brossier <piem@aubio.org> |
---|
3 | |
---|
4 | This file is part of aubio. |
---|
5 | |
---|
6 | aubio is free software: you can redistribute it and/or modify |
---|
7 | it under the terms of the GNU General Public License as published by |
---|
8 | the Free Software Foundation, either version 3 of the License, or |
---|
9 | (at your option) any later version. |
---|
10 | |
---|
11 | aubio is distributed in the hope that it will be useful, |
---|
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | GNU General Public License for more details. |
---|
15 | |
---|
16 | You should have received a copy of the GNU General Public License |
---|
17 | along with aubio. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | |
---|
19 | */ |
---|
20 | |
---|
21 | |
---|
22 | #include "aubio_priv.h" |
---|
23 | #include "fmat.h" |
---|
24 | #include "tensor.h" |
---|
25 | #include "conv2d.h" |
---|
26 | |
---|
27 | typedef enum |
---|
28 | { |
---|
29 | PAD_SAME = 0, // TODO |
---|
30 | PAD_VALID = 1, |
---|
31 | //PAD_CAUSAL = 2, // TODO (1d only, for dilated convolution) |
---|
32 | } aubio_conv2d_padding_type; |
---|
33 | |
---|
34 | struct _aubio_conv2d_t { |
---|
35 | // define internals here |
---|
36 | uint_t n_filters; |
---|
37 | uint_t kernel_shape[2]; // kernel sizes |
---|
38 | uint_t stride_shape[2]; // stride sizes |
---|
39 | |
---|
40 | aubio_conv2d_padding_type padding_mode; |
---|
41 | |
---|
42 | // these will be set after calling get_output_shape |
---|
43 | aubio_tensor_t *kernel; |
---|
44 | fvec_t *bias; |
---|
45 | uint_t output_shape[3]; // shape of output |
---|
46 | uint_t padding_start[2]; // {top, left} padding |
---|
47 | }; |
---|
48 | |
---|
49 | static void aubio_conv2d_debug(aubio_conv2d_t *c, aubio_tensor_t *input_tensor); |
---|
50 | |
---|
51 | aubio_conv2d_t *new_aubio_conv2d(uint_t n_filters, uint_t *kernel_shape) |
---|
52 | { |
---|
53 | aubio_conv2d_t *c = AUBIO_NEW(aubio_conv2d_t); |
---|
54 | |
---|
55 | // validate input parameters |
---|
56 | AUBIO_GOTO_FAILURE((sint_t)n_filters >= 1); |
---|
57 | AUBIO_GOTO_FAILURE((sint_t)kernel_shape[0] >= 1); |
---|
58 | AUBIO_GOTO_FAILURE((sint_t)kernel_shape[1] >= 1); |
---|
59 | |
---|
60 | // set internal variables |
---|
61 | c->n_filters = n_filters; |
---|
62 | c->kernel_shape[0] = kernel_shape[0]; |
---|
63 | c->kernel_shape[1] = kernel_shape[1]; |
---|
64 | |
---|
65 | // default to padding_mode="valid" |
---|
66 | c->padding_mode = PAD_VALID; |
---|
67 | // set default stride_shape to {1, 1} |
---|
68 | { |
---|
69 | uint_t default_stride[2] = {1, 1}; |
---|
70 | aubio_conv2d_set_stride(c, default_stride); |
---|
71 | } |
---|
72 | |
---|
73 | return c; |
---|
74 | |
---|
75 | failure: |
---|
76 | del_aubio_conv2d(c); |
---|
77 | return NULL; |
---|
78 | } |
---|
79 | |
---|
80 | void del_aubio_conv2d(aubio_conv2d_t *c) |
---|
81 | { |
---|
82 | AUBIO_ASSERT(c); |
---|
83 | if (c->kernel) |
---|
84 | del_aubio_tensor(c->kernel); |
---|
85 | if (c->bias) |
---|
86 | del_fvec(c->bias); |
---|
87 | AUBIO_FREE(c); |
---|
88 | } |
---|
89 | |
---|
90 | |
---|
91 | uint_t aubio_conv2d_set_stride(aubio_conv2d_t *c, |
---|
92 | uint_t stride[2]) |
---|
93 | { |
---|
94 | if ((sint_t)stride[0] < 1) return AUBIO_FAIL; |
---|
95 | if ((sint_t)stride[1] < 1) return AUBIO_FAIL; |
---|
96 | c->stride_shape[0] = stride[0]; |
---|
97 | c->stride_shape[1] = stride[1]; |
---|
98 | return AUBIO_OK; |
---|
99 | } |
---|
100 | |
---|
101 | uint_t *aubio_conv2d_get_stride(aubio_conv2d_t *c) |
---|
102 | { |
---|
103 | return c->stride_shape; |
---|
104 | } |
---|
105 | |
---|
106 | uint_t aubio_conv2d_get_output_shape(aubio_conv2d_t *c, |
---|
107 | aubio_tensor_t *input_tensor, |
---|
108 | uint_t *shape) |
---|
109 | { |
---|
110 | uint_t output_shape[3] = {0, 0, c->n_filters}; |
---|
111 | uint_t padding_start[2] = {0, 0}; |
---|
112 | |
---|
113 | // check input parameters |
---|
114 | AUBIO_ASSERT(input_tensor); |
---|
115 | AUBIO_ASSERT(shape); |
---|
116 | |
---|
117 | // reset output array |
---|
118 | shape[0] = 0; |
---|
119 | shape[1] = 0; |
---|
120 | shape[2] = 0; |
---|
121 | |
---|
122 | switch (c->padding_mode) { |
---|
123 | case PAD_SAME: |
---|
124 | // compute output shape |
---|
125 | output_shape[0] = (uint_t)CEIL(input_tensor->shape[0] |
---|
126 | / (smpl_t)c->stride_shape[0]); |
---|
127 | output_shape[1] = (uint_t)CEIL(input_tensor->shape[1] |
---|
128 | / (smpl_t)c->stride_shape[1]); |
---|
129 | |
---|
130 | uint_t padding_shape[2]; // total amount of padding |
---|
131 | padding_shape[0] = (output_shape[0] - 1) * c->stride_shape[0] |
---|
132 | + c->kernel_shape[0] - input_tensor->shape[0]; |
---|
133 | padding_shape[1] = (output_shape[1] - 1) * c->stride_shape[1] |
---|
134 | + c->kernel_shape[1] - input_tensor->shape[1]; |
---|
135 | |
---|
136 | padding_start[0] = FLOOR(padding_shape[0] / 2); |
---|
137 | padding_start[1] = FLOOR(padding_shape[1] / 2); |
---|
138 | |
---|
139 | break; |
---|
140 | case PAD_VALID: |
---|
141 | output_shape[0] = (input_tensor->shape[0] - c->kernel_shape[0] + 1) |
---|
142 | / c->stride_shape[0]; |
---|
143 | output_shape[1] = (input_tensor->shape[1] - c->kernel_shape[1] + 1) |
---|
144 | / c->stride_shape[1]; |
---|
145 | |
---|
146 | padding_start[0] = 0; |
---|
147 | padding_start[1] = 0; |
---|
148 | |
---|
149 | break; |
---|
150 | //case PAD_CAUSAL: |
---|
151 | // // TODO |
---|
152 | // return AUBIO_FAIL; |
---|
153 | default: |
---|
154 | return AUBIO_FAIL; |
---|
155 | } |
---|
156 | |
---|
157 | uint_t kernel_shape[4]; |
---|
158 | kernel_shape[0] = c->kernel_shape[0]; |
---|
159 | kernel_shape[1] = c->kernel_shape[1]; |
---|
160 | kernel_shape[2] = input_tensor->shape[2]; |
---|
161 | kernel_shape[3] = c->n_filters; |
---|
162 | |
---|
163 | if (c->kernel) del_aubio_tensor(c->kernel); |
---|
164 | if (c->bias) del_fvec(c->bias); |
---|
165 | |
---|
166 | c->kernel = new_aubio_tensor(4, kernel_shape); |
---|
167 | if (!c->kernel) return AUBIO_FAIL; |
---|
168 | c->bias = new_fvec(c->n_filters); |
---|
169 | |
---|
170 | // set internals upon success |
---|
171 | c->output_shape[0] = output_shape[0]; |
---|
172 | c->output_shape[1] = output_shape[1]; |
---|
173 | c->output_shape[2] = output_shape[2]; |
---|
174 | |
---|
175 | c->padding_start[0] = padding_start[0]; |
---|
176 | c->padding_start[1] = padding_start[1]; |
---|
177 | |
---|
178 | // set output |
---|
179 | shape[0] = output_shape[0]; |
---|
180 | shape[1] = output_shape[1]; |
---|
181 | shape[2] = output_shape[2]; |
---|
182 | |
---|
183 | aubio_conv2d_debug(c, input_tensor); |
---|
184 | |
---|
185 | return AUBIO_OK; |
---|
186 | } |
---|
187 | |
---|
188 | void aubio_conv2d_debug(aubio_conv2d_t *c, aubio_tensor_t *input_tensor) |
---|
189 | { |
---|
190 | // print some info |
---|
191 | AUBIO_ASSERT(c); |
---|
192 | uint_t n_params = (c->kernel->shape[0] * c->kernel->shape[2] + 1) |
---|
193 | * c->kernel->shape[1] * c->kernel->shape[3]; |
---|
194 | |
---|
195 | const char_t *tensor_str = aubio_tensor_get_shape_string(input_tensor); |
---|
196 | //AUBIO_DBG("conv2d: kernel_shape_str %s\n", kernel_shape_str); |
---|
197 | AUBIO_DBG("conv2d: %15s -> (%d, %d, %d)", |
---|
198 | tensor_str, |
---|
199 | c->output_shape[0], c->output_shape[1], c->output_shape[2]); |
---|
200 | tensor_str = aubio_tensor_get_shape_string(c->kernel); |
---|
201 | AUBIO_DBG(" (n_params=%d, kernel_shape=(%d, %d)," |
---|
202 | " weigths=%s, stride (%d, %d), pad_start [%d, %d])\n", |
---|
203 | n_params, c->kernel_shape[0], c->kernel_shape[1], |
---|
204 | tensor_str, |
---|
205 | c->stride_shape[0], c->stride_shape[1], |
---|
206 | -c->padding_start[0], -c->padding_start[1]); |
---|
207 | } |
---|
208 | |
---|
209 | uint_t aubio_conv2d_check_output_shape(aubio_conv2d_t *c, |
---|
210 | aubio_tensor_t *input_tensor, |
---|
211 | aubio_tensor_t *activations) |
---|
212 | { |
---|
213 | // fetch output_shape if it hasn't been done before |
---|
214 | if (c->output_shape[0] == 0 || |
---|
215 | c->output_shape[1] == 0 || |
---|
216 | c->output_shape[2] == 0) { |
---|
217 | if (!aubio_conv2d_get_output_shape(c, input_tensor, c->output_shape)) { |
---|
218 | return AUBIO_FAIL; |
---|
219 | } |
---|
220 | } |
---|
221 | |
---|
222 | // check we have as many filters as expected activation outputs |
---|
223 | if (activations->shape[2] != c->n_filters) return AUBIO_FAIL; |
---|
224 | if (activations->shape[2] != c->kernel->shape[3]) return AUBIO_FAIL; |
---|
225 | if (input_tensor->shape[2] != c->kernel->shape[2]) return AUBIO_FAIL; |
---|
226 | |
---|
227 | // check tensor activations has the expected sizes |
---|
228 | if (c->output_shape[0] != activations->shape[0]) return AUBIO_FAIL; |
---|
229 | if (c->output_shape[1] != activations->shape[1]) return AUBIO_FAIL; |
---|
230 | if (c->output_shape[2] != activations->shape[2]) return AUBIO_FAIL; |
---|
231 | return AUBIO_OK; |
---|
232 | } |
---|
233 | |
---|
234 | void aubio_conv2d_do(aubio_conv2d_t *c, aubio_tensor_t *input_tensor, |
---|
235 | aubio_tensor_t *activations) |
---|
236 | { |
---|
237 | uint_t i, j, k, l, a, b; |
---|
238 | uint_t stride_a, stride_b; |
---|
239 | sint_t x, y; |
---|
240 | smpl_t s, w, bias, acc; |
---|
241 | uint_t jj, ll, bb, yy; |
---|
242 | |
---|
243 | uint_t k_stride1 = c->kernel->shape[3]; |
---|
244 | uint_t k_stride2 = c->kernel->shape[2] * k_stride1; |
---|
245 | |
---|
246 | AUBIO_ASSERT(c && input_tensor && activations); |
---|
247 | // check we have the correct output activation sizes |
---|
248 | if (aubio_conv2d_check_output_shape(c, input_tensor, activations)) |
---|
249 | { |
---|
250 | AUBIO_ERR("conv2d: check_output_shape failed\n"); |
---|
251 | return; |
---|
252 | } |
---|
253 | |
---|
254 | // for each kernel filter k |
---|
255 | for (i = 0; i < activations->shape[2]; i++) { |
---|
256 | // get bias |
---|
257 | bias = c->bias->data[i]; |
---|
258 | stride_b = 0; // == j * c->stride_shape[1] |
---|
259 | jj = 0; // == j * activations->shape[2] |
---|
260 | // for each output y |
---|
261 | for (j = 0; j < activations->shape[1]; j++) { |
---|
262 | // for each output x |
---|
263 | stride_a = 0; // k * c->stride_shape[0] |
---|
264 | for (k = 0; k < activations->shape[0]; k++) { |
---|
265 | // reset output |
---|
266 | acc = 0; |
---|
267 | // compute convolution for one kernel |
---|
268 | for (a = 0; a < c->kernel_shape[0]; a++) { |
---|
269 | x = stride_a + a - c->padding_start[0]; |
---|
270 | if ((x < 0) || (x > (sint_t)input_tensor->shape[0] - 1)) |
---|
271 | continue; // padding with 0. |
---|
272 | bb = 0; // == b * k_stride2 |
---|
273 | for (b = 0; b < c->kernel_shape[1]; b++) { |
---|
274 | y = stride_b + b - c->padding_start[1]; |
---|
275 | if ((y < 0) || (y > (sint_t)input_tensor->shape[1] - 1)) |
---|
276 | continue; // padding with 0. |
---|
277 | yy = y * input_tensor->shape[2]; |
---|
278 | ll = bb + i; // + l * k_stride1 |
---|
279 | // for each input channel |
---|
280 | for (l = 0; l < input_tensor->shape[2]; l++) { |
---|
281 | // get kernel weight |
---|
282 | w = c->kernel->data[a][ll]; |
---|
283 | // get input sample |
---|
284 | s = input_tensor->data[x][yy + l]; |
---|
285 | acc += w * s; |
---|
286 | ll += k_stride1; |
---|
287 | } |
---|
288 | bb += k_stride2; |
---|
289 | } |
---|
290 | } |
---|
291 | stride_a += c->stride_shape[0]; |
---|
292 | // apply bias |
---|
293 | acc += bias; |
---|
294 | // set output activation |
---|
295 | activations->data[k][jj + i] = acc; |
---|
296 | } |
---|
297 | stride_b += c->stride_shape[1]; |
---|
298 | jj += activations->shape[2]; |
---|
299 | } |
---|
300 | } |
---|
301 | } |
---|
302 | |
---|
303 | void aubio_conv2d_do_backwards(aubio_conv2d_t *c, |
---|
304 | /*aubio_tensor_t *old_gradients,*/ |
---|
305 | aubio_tensor_t *gradients) |
---|
306 | { |
---|
307 | uint_t i, j, k, a, b; |
---|
308 | AUBIO_ASSERT(c && gradients); |
---|
309 | // TODO |
---|
310 | // for each kernel filter k |
---|
311 | for (i = 0; i < c->n_filters; i++) { |
---|
312 | // for each input column |
---|
313 | for (j = 0; j < gradients->shape[1]; j++) { |
---|
314 | // for each input row |
---|
315 | for (k = 0; k < gradients->shape[2]; k++) { |
---|
316 | for (a = 0; a < c->kernel_shape[0]; a++) { |
---|
317 | for (b = 0; b < c->kernel_shape[1]; b++) { |
---|
318 | #if 0 |
---|
319 | smpl_t grad = gradients->data[i]->data[a][b]; |
---|
320 | smpl_t oldgrad = old_gradients->data[i]->data[a][b]; |
---|
321 | smpl_t m = (grad - oldgrad * momentum); |
---|
322 | w -= lr * m - lr * decay * w; |
---|
323 | #endif |
---|
324 | } |
---|
325 | } |
---|
326 | } |
---|
327 | } |
---|
328 | } |
---|
329 | } |
---|
330 | |
---|
331 | uint_t aubio_conv2d_set_padding_mode(aubio_conv2d_t *c, |
---|
332 | const char_t *padding_mode) |
---|
333 | { |
---|
334 | AUBIO_ASSERT(c && padding_mode); |
---|
335 | if (strncmp(padding_mode, "same", PATH_MAX) == 0) { |
---|
336 | c->padding_mode = PAD_SAME; |
---|
337 | } else if (strncmp(padding_mode, "valid", PATH_MAX) == 0) { |
---|
338 | c->padding_mode = PAD_VALID; |
---|
339 | } else { |
---|
340 | return AUBIO_FAIL; |
---|
341 | } |
---|
342 | return AUBIO_OK; |
---|
343 | } |
---|
344 | |
---|
345 | uint_t aubio_conv2d_set_kernel(aubio_conv2d_t *c, aubio_tensor_t *kernel) |
---|
346 | { |
---|
347 | uint_t i; |
---|
348 | AUBIO_ASSERT(c && kernel); |
---|
349 | for (i = 0; i < c->kernel->ndim; i++) { |
---|
350 | AUBIO_ASSERT(c->kernel->shape[i] == kernel->shape[i]); |
---|
351 | } |
---|
352 | return AUBIO_OK; |
---|
353 | } |
---|
354 | |
---|
355 | aubio_tensor_t *aubio_conv2d_get_kernel(aubio_conv2d_t* c) |
---|
356 | { |
---|
357 | AUBIO_ASSERT(c && c->kernel); |
---|
358 | return c->kernel; |
---|
359 | } |
---|
360 | |
---|
361 | uint_t aubio_conv2d_set_bias(aubio_conv2d_t *c, fvec_t *bias) |
---|
362 | { |
---|
363 | AUBIO_ASSERT(c && bias); |
---|
364 | AUBIO_ASSERT(c->kernel_shape[1] == bias->length); |
---|
365 | return AUBIO_OK; |
---|
366 | } |
---|
367 | |
---|
368 | fvec_t *aubio_conv2d_get_bias(aubio_conv2d_t* c) |
---|
369 | { |
---|
370 | AUBIO_ASSERT(c && c->bias); |
---|
371 | return c->bias; |
---|
372 | } |
---|