1 | /* |
---|
2 | Copyright (C) 2018 Paul Brossier <piem@aubio.org> |
---|
3 | |
---|
4 | This file is part of aubio. |
---|
5 | |
---|
6 | aubio is free software: you can redistribute it and/or modify |
---|
7 | it under the terms of the GNU General Public License as published by |
---|
8 | the Free Software Foundation, either version 3 of the License, or |
---|
9 | (at your option) any later version. |
---|
10 | |
---|
11 | aubio is distributed in the hope that it will be useful, |
---|
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | GNU General Public License for more details. |
---|
15 | |
---|
16 | You should have received a copy of the GNU General Public License |
---|
17 | along with aubio. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | |
---|
19 | */ |
---|
20 | |
---|
21 | |
---|
22 | #include "aubio_priv.h" |
---|
23 | #include "fmat.h" |
---|
24 | #include "tensor.h" |
---|
25 | #include "conv2d.h" |
---|
26 | |
---|
27 | typedef enum |
---|
28 | { |
---|
29 | PAD_SAME = 0, // same, aka half mode |
---|
30 | PAD_VALID = 1 // valid, aka no padding |
---|
31 | } aubio_conv2d_padding_t; |
---|
32 | |
---|
33 | struct _aubio_conv2d_t { |
---|
34 | // define internals here |
---|
35 | uint_t n_filters; |
---|
36 | uint_t kernel_shape[2]; // kernel sizes |
---|
37 | uint_t stride_shape[2]; // stride sizes |
---|
38 | |
---|
39 | aubio_conv2d_padding_t padding_mode; |
---|
40 | |
---|
41 | // these will be set after calling get_output_shape |
---|
42 | aubio_tensor_t *kernel; |
---|
43 | fvec_t *bias; |
---|
44 | uint_t output_shape[3]; // shape of output |
---|
45 | uint_t padding_start[2]; // {top, left} padding |
---|
46 | |
---|
47 | #if defined(HAVE_BLAS) |
---|
48 | aubio_tensor_t *padded_input; |
---|
49 | #endif |
---|
50 | }; |
---|
51 | |
---|
52 | #if defined(DEBUG) |
---|
53 | static void aubio_conv2d_debug(aubio_conv2d_t *c, aubio_tensor_t *input_tensor); |
---|
54 | #endif |
---|
55 | |
---|
56 | aubio_conv2d_t *new_aubio_conv2d(uint_t n_filters, uint_t kernel_shape[2]) |
---|
57 | { |
---|
58 | aubio_conv2d_t *c = AUBIO_NEW(aubio_conv2d_t); |
---|
59 | |
---|
60 | // validate input parameters |
---|
61 | AUBIO_GOTO_FAILURE((sint_t)n_filters >= 1); |
---|
62 | AUBIO_GOTO_FAILURE((sint_t)kernel_shape[0] >= 1); |
---|
63 | AUBIO_GOTO_FAILURE((sint_t)kernel_shape[1] >= 1); |
---|
64 | |
---|
65 | // set internal variables |
---|
66 | c->n_filters = n_filters; |
---|
67 | c->kernel_shape[0] = kernel_shape[0]; |
---|
68 | c->kernel_shape[1] = kernel_shape[1]; |
---|
69 | |
---|
70 | // default to padding_mode="valid" |
---|
71 | c->padding_mode = PAD_VALID; |
---|
72 | // set default stride_shape to {1, 1} |
---|
73 | { |
---|
74 | uint_t default_stride[2] = {1, 1}; |
---|
75 | aubio_conv2d_set_stride(c, default_stride); |
---|
76 | } |
---|
77 | |
---|
78 | return c; |
---|
79 | |
---|
80 | failure: |
---|
81 | del_aubio_conv2d(c); |
---|
82 | return NULL; |
---|
83 | } |
---|
84 | |
---|
85 | void del_aubio_conv2d(aubio_conv2d_t *c) |
---|
86 | { |
---|
87 | AUBIO_ASSERT(c); |
---|
88 | if (c->kernel) |
---|
89 | del_aubio_tensor(c->kernel); |
---|
90 | if (c->bias) |
---|
91 | del_fvec(c->bias); |
---|
92 | #if defined(HAVE_BLAS) |
---|
93 | if (c->padded_input) |
---|
94 | del_aubio_tensor(c->padded_input); |
---|
95 | #endif |
---|
96 | AUBIO_FREE(c); |
---|
97 | } |
---|
98 | |
---|
99 | |
---|
100 | uint_t aubio_conv2d_set_stride(aubio_conv2d_t *c, |
---|
101 | uint_t stride[2]) |
---|
102 | { |
---|
103 | if ((sint_t)stride[0] < 1) return AUBIO_FAIL; |
---|
104 | if ((sint_t)stride[1] < 1) return AUBIO_FAIL; |
---|
105 | c->stride_shape[0] = stride[0]; |
---|
106 | c->stride_shape[1] = stride[1]; |
---|
107 | return AUBIO_OK; |
---|
108 | } |
---|
109 | |
---|
110 | uint_t *aubio_conv2d_get_stride(aubio_conv2d_t *c) |
---|
111 | { |
---|
112 | return c->stride_shape; |
---|
113 | } |
---|
114 | |
---|
115 | uint_t aubio_conv2d_get_output_shape(aubio_conv2d_t *c, |
---|
116 | aubio_tensor_t *input_tensor, |
---|
117 | uint_t *shape) |
---|
118 | { |
---|
119 | uint_t output_shape[3] = {0, 0, c->n_filters}; |
---|
120 | uint_t padding_start[2] = {0, 0}; |
---|
121 | // total amount of padding |
---|
122 | uint_t padding_shape[2] = {0, 0}; |
---|
123 | |
---|
124 | // check input parameters |
---|
125 | AUBIO_ASSERT(input_tensor); |
---|
126 | AUBIO_ASSERT(shape); |
---|
127 | |
---|
128 | // reset output array |
---|
129 | shape[0] = 0; |
---|
130 | shape[1] = 0; |
---|
131 | shape[2] = 0; |
---|
132 | |
---|
133 | switch (c->padding_mode) { |
---|
134 | case PAD_SAME: |
---|
135 | // compute output shape |
---|
136 | output_shape[0] = (uint_t)CEIL(input_tensor->shape[0] |
---|
137 | / (smpl_t)c->stride_shape[0]); |
---|
138 | output_shape[1] = (uint_t)CEIL(input_tensor->shape[1] |
---|
139 | / (smpl_t)c->stride_shape[1]); |
---|
140 | |
---|
141 | padding_shape[0] = (output_shape[0] - 1) * c->stride_shape[0] |
---|
142 | + c->kernel_shape[0] - input_tensor->shape[0]; |
---|
143 | padding_shape[1] = (output_shape[1] - 1) * c->stride_shape[1] |
---|
144 | + c->kernel_shape[1] - input_tensor->shape[1]; |
---|
145 | |
---|
146 | padding_start[0] = FLOOR(padding_shape[0] / 2); |
---|
147 | padding_start[1] = FLOOR(padding_shape[1] / 2); |
---|
148 | |
---|
149 | break; |
---|
150 | case PAD_VALID: |
---|
151 | output_shape[0] = (input_tensor->shape[0] - c->kernel_shape[0] + 1) |
---|
152 | / c->stride_shape[0]; |
---|
153 | output_shape[1] = (input_tensor->shape[1] - c->kernel_shape[1] + 1) |
---|
154 | / c->stride_shape[1]; |
---|
155 | |
---|
156 | padding_start[0] = 0; |
---|
157 | padding_start[1] = 0; |
---|
158 | |
---|
159 | break; |
---|
160 | //case PAD_CAUSAL: |
---|
161 | // // TODO |
---|
162 | // return AUBIO_FAIL; |
---|
163 | default: |
---|
164 | return AUBIO_FAIL; |
---|
165 | } |
---|
166 | |
---|
167 | uint_t kernel_shape[4]; |
---|
168 | kernel_shape[0] = c->kernel_shape[0]; |
---|
169 | kernel_shape[1] = c->kernel_shape[1]; |
---|
170 | kernel_shape[2] = input_tensor->shape[2]; |
---|
171 | kernel_shape[3] = c->n_filters; |
---|
172 | |
---|
173 | if (c->kernel) del_aubio_tensor(c->kernel); |
---|
174 | if (c->bias) del_fvec(c->bias); |
---|
175 | |
---|
176 | c->kernel = new_aubio_tensor(4, kernel_shape); |
---|
177 | if (!c->kernel) return AUBIO_FAIL; |
---|
178 | c->bias = new_fvec(c->n_filters); |
---|
179 | |
---|
180 | // set internals upon success |
---|
181 | c->output_shape[0] = output_shape[0]; |
---|
182 | c->output_shape[1] = output_shape[1]; |
---|
183 | c->output_shape[2] = output_shape[2]; |
---|
184 | |
---|
185 | c->padding_start[0] = padding_start[0]; |
---|
186 | c->padding_start[1] = padding_start[1]; |
---|
187 | |
---|
188 | // set output |
---|
189 | shape[0] = output_shape[0]; |
---|
190 | shape[1] = output_shape[1]; |
---|
191 | shape[2] = output_shape[2]; |
---|
192 | |
---|
193 | |
---|
194 | #if defined(HAVE_BLAS) |
---|
195 | // im2col padding |
---|
196 | padding_shape[0] = output_shape[0] * output_shape[1]; |
---|
197 | padding_shape[1] = c->kernel_shape[0] * c->kernel_shape[1] |
---|
198 | * input_tensor->shape[2]; |
---|
199 | c->padded_input = new_aubio_tensor(2, padding_shape); |
---|
200 | if (!c-> padded_input) { |
---|
201 | AUBIO_MSG("conv2d: failed creating padded_input with shape (%d, %d, %d)\n", |
---|
202 | padding_shape); |
---|
203 | return AUBIO_FAIL; |
---|
204 | } |
---|
205 | #endif |
---|
206 | |
---|
207 | #if defined(DEBUG) |
---|
208 | aubio_conv2d_debug(c, input_tensor); |
---|
209 | #endif |
---|
210 | |
---|
211 | return AUBIO_OK; |
---|
212 | } |
---|
213 | |
---|
214 | #if defined(DEBUG) |
---|
215 | void aubio_conv2d_debug(aubio_conv2d_t *c, aubio_tensor_t *input_tensor) |
---|
216 | { |
---|
217 | // print some info |
---|
218 | AUBIO_ASSERT(c); |
---|
219 | uint_t n_params = (c->kernel->shape[0] * c->kernel->shape[2] + 1) |
---|
220 | * c->kernel->shape[1] * c->kernel->shape[3]; |
---|
221 | |
---|
222 | const char_t *tensor_str = aubio_tensor_get_shape_string(input_tensor); |
---|
223 | //AUBIO_DBG("conv2d: kernel_shape_str %s\n", kernel_shape_str); |
---|
224 | AUBIO_DBG("conv2d: %15s -> (%d, %d, %d)", |
---|
225 | tensor_str, |
---|
226 | c->output_shape[0], c->output_shape[1], c->output_shape[2]); |
---|
227 | tensor_str = aubio_tensor_get_shape_string(c->kernel); |
---|
228 | AUBIO_DBG(" (n_params=%d, kernel_shape=(%d, %d)," |
---|
229 | " weigths=%s, stride (%d, %d), pad_start [%d, %d])\n", |
---|
230 | n_params, c->kernel_shape[0], c->kernel_shape[1], |
---|
231 | tensor_str, |
---|
232 | c->stride_shape[0], c->stride_shape[1], |
---|
233 | -c->padding_start[0], -c->padding_start[1]); |
---|
234 | } |
---|
235 | #endif |
---|
236 | |
---|
237 | uint_t aubio_conv2d_check_output_shape(aubio_conv2d_t *c, |
---|
238 | aubio_tensor_t *input_tensor, |
---|
239 | aubio_tensor_t *activations) |
---|
240 | { |
---|
241 | // fetch output_shape if it hasn't been done before |
---|
242 | if (c->output_shape[0] == 0 || |
---|
243 | c->output_shape[1] == 0 || |
---|
244 | c->output_shape[2] == 0) { |
---|
245 | if (!aubio_conv2d_get_output_shape(c, input_tensor, c->output_shape)) { |
---|
246 | return AUBIO_FAIL; |
---|
247 | } |
---|
248 | } |
---|
249 | |
---|
250 | // check we have as many filters as expected activation outputs |
---|
251 | if (activations->shape[2] != c->n_filters) return AUBIO_FAIL; |
---|
252 | if (activations->shape[2] != c->kernel->shape[3]) return AUBIO_FAIL; |
---|
253 | if (input_tensor->shape[2] != c->kernel->shape[2]) return AUBIO_FAIL; |
---|
254 | |
---|
255 | // check tensor activations has the expected sizes |
---|
256 | if (c->output_shape[0] != activations->shape[0]) return AUBIO_FAIL; |
---|
257 | if (c->output_shape[1] != activations->shape[1]) return AUBIO_FAIL; |
---|
258 | if (c->output_shape[2] != activations->shape[2]) return AUBIO_FAIL; |
---|
259 | return AUBIO_OK; |
---|
260 | } |
---|
261 | |
---|
262 | #if !defined(HAVE_BLAS) |
---|
263 | void aubio_conv2d_do(aubio_conv2d_t *c, aubio_tensor_t *input_tensor, |
---|
264 | aubio_tensor_t *activations) |
---|
265 | { |
---|
266 | uint_t i, j, k, l, a, b; |
---|
267 | uint_t stride_a, stride_b; |
---|
268 | sint_t x, y; |
---|
269 | smpl_t s, w, bias, acc; |
---|
270 | uint_t jj, ll, bb, yy; |
---|
271 | |
---|
272 | uint_t k_stride1 = c->kernel->shape[3]; |
---|
273 | uint_t k_stride2 = c->kernel->shape[2] * k_stride1; |
---|
274 | |
---|
275 | AUBIO_ASSERT(c && input_tensor && activations); |
---|
276 | // check we have the correct output activation sizes |
---|
277 | if (aubio_conv2d_check_output_shape(c, input_tensor, activations)) |
---|
278 | { |
---|
279 | AUBIO_ERR("conv2d: check_output_shape failed\n"); |
---|
280 | return; |
---|
281 | } |
---|
282 | |
---|
283 | // for each kernel filter k |
---|
284 | for (i = 0; i < activations->shape[2]; i++) { |
---|
285 | // get bias |
---|
286 | bias = c->bias->data[i]; |
---|
287 | stride_b = 0; // == j * c->stride_shape[1] |
---|
288 | jj = 0; // == j * activations->shape[2] |
---|
289 | // for each output y |
---|
290 | for (j = 0; j < activations->shape[1]; j++) { |
---|
291 | // for each output x |
---|
292 | stride_a = 0; // k * c->stride_shape[0] |
---|
293 | for (k = 0; k < activations->shape[0]; k++) { |
---|
294 | // reset output |
---|
295 | acc = 0; |
---|
296 | // compute convolution for one kernel |
---|
297 | for (a = 0; a < c->kernel_shape[0]; a++) { |
---|
298 | x = stride_a + a - c->padding_start[0]; |
---|
299 | if ((x < 0) || (x > (sint_t)input_tensor->shape[0] - 1)) |
---|
300 | continue; // padding with 0. |
---|
301 | bb = 0; // == b * k_stride2 |
---|
302 | for (b = 0; b < c->kernel_shape[1]; b++) { |
---|
303 | y = stride_b + b - c->padding_start[1]; |
---|
304 | if ((y < 0) || (y > (sint_t)input_tensor->shape[1] - 1)) |
---|
305 | continue; // padding with 0. |
---|
306 | yy = y * input_tensor->shape[2]; |
---|
307 | ll = bb + i; // + l * k_stride1 |
---|
308 | // for each input channel |
---|
309 | for (l = 0; l < input_tensor->shape[2]; l++) { |
---|
310 | // get kernel weight |
---|
311 | w = c->kernel->data[a][ll]; |
---|
312 | // get input sample |
---|
313 | s = input_tensor->data[x][yy + l]; |
---|
314 | acc += w * s; |
---|
315 | ll += k_stride1; |
---|
316 | } |
---|
317 | bb += k_stride2; |
---|
318 | } |
---|
319 | } |
---|
320 | stride_a += c->stride_shape[0]; |
---|
321 | // apply bias |
---|
322 | acc += bias; |
---|
323 | // set output activation |
---|
324 | activations->data[k][jj + i] = acc; |
---|
325 | } |
---|
326 | stride_b += c->stride_shape[1]; |
---|
327 | jj += activations->shape[2]; |
---|
328 | } |
---|
329 | } |
---|
330 | } |
---|
331 | |
---|
332 | #else /* HAVE_BLAS */ |
---|
333 | |
---|
334 | void aubio_conv2d_copy_to_padded(aubio_conv2d_t *o, |
---|
335 | aubio_tensor_t *input_tensor, aubio_tensor_t *padded_input) |
---|
336 | { |
---|
337 | // naive implementation of im2col |
---|
338 | uint_t i, j, k, l, m; |
---|
339 | uint_t stride_4 = o->kernel->shape[2]; |
---|
340 | uint_t stride_3 = o->kernel->shape[1] * stride_4; |
---|
341 | uint_t stride_2 = o->kernel->shape[0] * stride_3; |
---|
342 | uint_t stride_1 = o->output_shape[1] * stride_2; |
---|
343 | uint_t stride_in_2 = input_tensor->shape[2]; |
---|
344 | uint_t stride_in_1 = input_tensor->shape[1] * stride_in_2; |
---|
345 | |
---|
346 | AUBIO_ASSERT(padded_input->size == |
---|
347 | o->output_shape[0] * o->output_shape[1] |
---|
348 | * o->kernel_shape[0] * o->kernel_shape[1] |
---|
349 | * input_tensor->shape[2]); |
---|
350 | AUBIO_ASSERT(input_tensor->shape[2] == o->kernel->shape[2]); |
---|
351 | |
---|
352 | for (i = 0; i < o->output_shape[0]; i++) |
---|
353 | { |
---|
354 | for (j = 0; j < o->output_shape[1]; j++) |
---|
355 | { |
---|
356 | for (k = 0; k < o->kernel->shape[0]; k++) |
---|
357 | { |
---|
358 | for (l = 0; l < o->kernel->shape[1]; l++) |
---|
359 | { |
---|
360 | for (m = 0; m < o->kernel->shape[2]; m++) |
---|
361 | { |
---|
362 | uint_t read_i = i * o->stride_shape[0] + k; |
---|
363 | uint_t read_j = j * o->stride_shape[1] + l; |
---|
364 | if (read_i < o->padding_start[0]) |
---|
365 | continue; |
---|
366 | else if (read_i - o->padding_start[0] >= input_tensor->shape[0]) |
---|
367 | continue; |
---|
368 | if (read_j < o->padding_start[1]) |
---|
369 | continue; |
---|
370 | else if (read_j - o->padding_start[1] >= input_tensor->shape[1]) |
---|
371 | continue; |
---|
372 | |
---|
373 | sint_t idx = |
---|
374 | ((read_i - o->padding_start[0])) * stride_in_1 |
---|
375 | + ((read_j - o->padding_start[1])) * stride_in_2 |
---|
376 | + m; |
---|
377 | padded_input->buffer[i * stride_1 |
---|
378 | + j * stride_2 |
---|
379 | + k * stride_3 |
---|
380 | + l * stride_4 |
---|
381 | + m] |
---|
382 | = input_tensor->buffer[idx]; |
---|
383 | } |
---|
384 | } |
---|
385 | } |
---|
386 | } |
---|
387 | } |
---|
388 | } |
---|
389 | |
---|
390 | void aubio_conv2d_do(aubio_conv2d_t *o, aubio_tensor_t *input_tensor, |
---|
391 | aubio_tensor_t *activations) |
---|
392 | { |
---|
393 | uint_t i, j; |
---|
394 | smpl_t bias; |
---|
395 | aubio_tensor_t *padded_input = o->padded_input; |
---|
396 | aubio_tensor_t *kernel = o->kernel; |
---|
397 | |
---|
398 | AUBIO_ASSERT(o && input_tensor && activations); |
---|
399 | // check we have the correct output activation sizes |
---|
400 | if (aubio_conv2d_check_output_shape(o, input_tensor, activations)) |
---|
401 | { |
---|
402 | AUBIO_ERR("conv2d: check_output_shape failed\n"); |
---|
403 | return; |
---|
404 | } |
---|
405 | |
---|
406 | uint_t M = padded_input->shape[0]; |
---|
407 | uint_t K = padded_input->size/padded_input->shape[0]; |
---|
408 | uint_t N = kernel->size / K; |
---|
409 | |
---|
410 | // check sizes |
---|
411 | AUBIO_ASSERT(M * K == padded_input->size); |
---|
412 | AUBIO_ASSERT(N * K == kernel->size); |
---|
413 | AUBIO_ASSERT(M * N == activations->size); |
---|
414 | |
---|
415 | // copy input to im2col sliding window version |
---|
416 | aubio_conv2d_copy_to_padded(o, input_tensor, padded_input); |
---|
417 | |
---|
418 | aubio_cblas__gemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, |
---|
419 | M, // M |
---|
420 | N, // N |
---|
421 | K, // K |
---|
422 | 1.F, // alpha |
---|
423 | padded_input->buffer, // M x K matrix |
---|
424 | K, // K (2nd dim of A) |
---|
425 | kernel->buffer, // K x N matrix |
---|
426 | N, // N |
---|
427 | 0.F, // beta |
---|
428 | activations->buffer, // M x N matrix |
---|
429 | N); // N (2nd dim of C) |
---|
430 | |
---|
431 | |
---|
432 | // apply bias |
---|
433 | for (i = 0; i < activations->shape[2]; i++) { |
---|
434 | bias = o->bias->data[i]; |
---|
435 | for (j = 0; j < activations->shape[0] * activations->shape[1]; j++) |
---|
436 | { |
---|
437 | activations->buffer[j * activations->shape[2] + i] += bias; |
---|
438 | } |
---|
439 | } |
---|
440 | } |
---|
441 | #endif |
---|
442 | |
---|
443 | void aubio_conv2d_do_backwards(aubio_conv2d_t *c, |
---|
444 | /*aubio_tensor_t *old_gradients,*/ |
---|
445 | aubio_tensor_t *gradients) |
---|
446 | { |
---|
447 | uint_t i, j, k, a, b; |
---|
448 | AUBIO_ASSERT(c && gradients); |
---|
449 | // TODO |
---|
450 | // for each kernel filter k |
---|
451 | for (i = 0; i < c->n_filters; i++) { |
---|
452 | // for each input column |
---|
453 | for (j = 0; j < gradients->shape[1]; j++) { |
---|
454 | // for each input row |
---|
455 | for (k = 0; k < gradients->shape[2]; k++) { |
---|
456 | for (a = 0; a < c->kernel_shape[0]; a++) { |
---|
457 | for (b = 0; b < c->kernel_shape[1]; b++) { |
---|
458 | #if 0 |
---|
459 | smpl_t grad = gradients->data[i]->data[a][b]; |
---|
460 | smpl_t oldgrad = old_gradients->data[i]->data[a][b]; |
---|
461 | smpl_t m = (grad - oldgrad * momentum); |
---|
462 | w -= lr * m - lr * decay * w; |
---|
463 | #endif |
---|
464 | } |
---|
465 | } |
---|
466 | } |
---|
467 | } |
---|
468 | } |
---|
469 | } |
---|
470 | |
---|
471 | uint_t aubio_conv2d_set_padding_mode(aubio_conv2d_t *c, |
---|
472 | const char_t *padding_mode) |
---|
473 | { |
---|
474 | AUBIO_ASSERT(c && padding_mode); |
---|
475 | if (strncasecmp(padding_mode, "same", PATH_MAX) == 0) { |
---|
476 | c->padding_mode = PAD_SAME; |
---|
477 | } else if (strncasecmp(padding_mode, "valid", PATH_MAX) == 0) { |
---|
478 | c->padding_mode = PAD_VALID; |
---|
479 | } else { |
---|
480 | return AUBIO_FAIL; |
---|
481 | } |
---|
482 | return AUBIO_OK; |
---|
483 | } |
---|
484 | |
---|
485 | uint_t aubio_conv2d_set_kernel(aubio_conv2d_t *c, aubio_tensor_t *kernel) |
---|
486 | { |
---|
487 | AUBIO_ASSERT(c && kernel); |
---|
488 | if (aubio_tensor_have_same_shape(kernel, c->kernel)) { |
---|
489 | aubio_tensor_copy(kernel, c->kernel); |
---|
490 | return AUBIO_OK; |
---|
491 | } |
---|
492 | return AUBIO_FAIL; |
---|
493 | } |
---|
494 | |
---|
495 | aubio_tensor_t *aubio_conv2d_get_kernel(aubio_conv2d_t* c) |
---|
496 | { |
---|
497 | AUBIO_ASSERT(c && c->kernel); |
---|
498 | return c->kernel; |
---|
499 | } |
---|
500 | |
---|
501 | uint_t aubio_conv2d_set_bias(aubio_conv2d_t *c, fvec_t *bias) |
---|
502 | { |
---|
503 | AUBIO_ASSERT(c && bias); |
---|
504 | if (bias->length == c->bias->length) { |
---|
505 | fvec_copy(bias, c->bias); |
---|
506 | return AUBIO_OK; |
---|
507 | } |
---|
508 | return AUBIO_OK; |
---|
509 | } |
---|
510 | |
---|
511 | fvec_t *aubio_conv2d_get_bias(aubio_conv2d_t* c) |
---|
512 | { |
---|
513 | AUBIO_ASSERT(c && c->bias); |
---|
514 | return c->bias; |
---|
515 | } |
---|