Vector Optimized Library of Kernels 3.2.0
Architecture-tuned implementations of math kernels
Loading...
Searching...
No Matches
volk_32fc_x2_conjugate_dot_prod_32fc.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2012, 2014 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
60
61#ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
62#define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
63
64
65#include <volk/volk_complex.h>
66
67
68#ifdef LV_HAVE_GENERIC
69
71 const lv_32fc_t* input,
72 const lv_32fc_t* taps,
73 unsigned int num_points)
74{
75 lv_32fc_t res = lv_cmake(0.f, 0.f);
76 for (unsigned int i = 0; i < num_points; ++i) {
77 res += (*input++) * lv_conj((*taps++));
78 }
79 *result = res;
80}
81
82#endif /*LV_HAVE_GENERIC*/
83
84#ifdef LV_HAVE_GENERIC
85
87 const lv_32fc_t* input,
88 const lv_32fc_t* taps,
89 unsigned int num_points)
90{
91
92 const unsigned int num_bytes = num_points * 8;
93
94 float* res = (float*)result;
95 float* in = (float*)input;
96 float* tp = (float*)taps;
97 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
98
99 float sum0[2] = { 0, 0 };
100 float sum1[2] = { 0, 0 };
101 unsigned int i = 0;
102
103 for (i = 0; i < n_2_ccomplex_blocks; ++i) {
104 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
105 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
106 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
107 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
108
109 in += 4;
110 tp += 4;
111 }
112
113 res[0] = sum0[0] + sum1[0];
114 res[1] = sum0[1] + sum1[1];
115
116 if (num_bytes >> 3 & 1) {
117 *result += input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]);
118 }
119}
120
121#endif /*LV_HAVE_GENERIC*/
122
123#ifdef LV_HAVE_AVX
124
125#include <immintrin.h>
126
128 const lv_32fc_t* input,
129 const lv_32fc_t* taps,
130 unsigned int num_points)
131{
132 // Partial sums for indices i, i+1, i+2 and i+3.
133 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
134 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
135
136 for (long unsigned i = 0; i < (num_points & ~3u); i += 4) {
137 /* Four complex elements a time are processed.
138 * (ar + j⋅ai)*conj(br + j⋅bi) =
139 * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
140 */
141
142 /* Load input and taps, split and duplicate real und imaginary parts of taps.
143 * a: | ai,i+3 | ar,i+3 | … | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
144 * b: | bi,i+3 | br,i+3 | … | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
145 * b_real: | br,i+3 | br,i+3 | … | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
146 * b_imag: | bi,i+3 | bi,i+3 | … | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
147 */
148 __m256 a = _mm256_loadu_ps((const float*)&input[i]);
149 __m256 b = _mm256_loadu_ps((const float*)&taps[i]);
150 __m256 b_real = _mm256_moveldup_ps(b);
151 __m256 b_imag = _mm256_movehdup_ps(b);
152
153 // Add | ai⋅br,i+3 | ar⋅br,i+3 | … | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
154 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
155 // Add | ai⋅bi,i+3 | −ar⋅bi,i+3 | … | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
156 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
157 }
158
159 // Swap position of −ar⋅bi and ai⋅bi.
160 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
161 // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains four such partial sums.
162 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
163 /* Sum the four partial sums: Add high half of vector sum to the low one, i.e.
164 * s1 + s3 and s0 + s2 …
165 */
166 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
167 // … and now (s0 + s2) + (s1 + s3)
168 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
169 // Store result.
170 __m128 lower = _mm256_extractf128_ps(sum, 0);
171 _mm_storel_pi((__m64*)result, lower);
172
173 // Handle the last elements if num_points mod 4 is bigger than 0.
174 for (long unsigned i = num_points & ~3u; i < num_points; ++i) {
175 *result += lv_cmake(lv_creal(input[i]) * lv_creal(taps[i]) +
176 lv_cimag(input[i]) * lv_cimag(taps[i]),
177 lv_cimag(input[i]) * lv_creal(taps[i]) -
178 lv_creal(input[i]) * lv_cimag(taps[i]));
179 }
180}
181
182#endif /* LV_HAVE_AVX */
183
184#ifdef LV_HAVE_SSE3
185
186#include <pmmintrin.h>
187#include <xmmintrin.h>
188
190 const lv_32fc_t* input,
191 const lv_32fc_t* taps,
192 unsigned int num_points)
193{
194 // Partial sums for indices i and i+1.
195 __m128 sum_a_mult_b_real = _mm_setzero_ps();
196 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
197
198 for (long unsigned i = 0; i < (num_points & ~1u); i += 2) {
199 /* Two complex elements a time are processed.
200 * (ar + j⋅ai)*conj(br + j⋅bi) =
201 * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
202 */
203
204 /* Load input and taps, split and duplicate real und imaginary parts of taps.
205 * a: | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
206 * b: | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
207 * b_real: | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
208 * b_imag: | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
209 */
210 __m128 a = _mm_loadu_ps((const float*)&input[i]);
211 __m128 b = _mm_loadu_ps((const float*)&taps[i]);
212 __m128 b_real = _mm_moveldup_ps(b);
213 __m128 b_imag = _mm_movehdup_ps(b);
214
215 // Add | ai⋅br,i+1 | ar⋅br,i+1 | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
216 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
217 // Add | ai⋅bi,i+1 | −ar⋅bi,i+1 | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
218 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
219 }
220
221 // Swap position of −ar⋅bi and ai⋅bi.
222 sum_a_mult_b_imag =
223 _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
224 // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains two such partial sums.
225 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
226 // Sum the two partial sums.
227 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
228 // Store result.
229 _mm_storel_pi((__m64*)result, sum);
230
231 // Handle the last element if num_points mod 2 is 1.
232 if (num_points & 1u) {
233 *result += lv_cmake(
234 lv_creal(input[num_points - 1]) * lv_creal(taps[num_points - 1]) +
235 lv_cimag(input[num_points - 1]) * lv_cimag(taps[num_points - 1]),
236 lv_cimag(input[num_points - 1]) * lv_creal(taps[num_points - 1]) -
237 lv_creal(input[num_points - 1]) * lv_cimag(taps[num_points - 1]));
238 }
239}
240
241#endif /*LV_HAVE_SSE3*/
242
243#ifdef LV_HAVE_NEON
244#include <arm_neon.h>
246 const lv_32fc_t* input,
247 const lv_32fc_t* taps,
248 unsigned int num_points)
249{
250
251 unsigned int quarter_points = num_points / 4;
252 unsigned int number;
253
254 lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
255 lv_32fc_t* b_ptr = (lv_32fc_t*)input;
256 // for 2-lane vectors, 1st lane holds the real part,
257 // 2nd lane holds the imaginary part
258 float32x4x2_t a_val, b_val, accumulator;
259 float32x4x2_t tmp_imag;
260 accumulator.val[0] = vdupq_n_f32(0);
261 accumulator.val[1] = vdupq_n_f32(0);
262
263 for (number = 0; number < quarter_points; ++number) {
264 a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
265 b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
266 __VOLK_PREFETCH(a_ptr + 8);
267 __VOLK_PREFETCH(b_ptr + 8);
268
269 // do the first multiply
270 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
271 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
272
273 // use multiply accumulate/subtract to get result
274 tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
275 tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
276
277 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
278 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
279
280 // increment pointers
281 a_ptr += 4;
282 b_ptr += 4;
283 }
284 lv_32fc_t accum_result[4];
285 vst2q_f32((float*)accum_result, accumulator);
286 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
287
288 // tail case
289 for (number = quarter_points * 4; number < num_points; ++number) {
290 *result += (*a_ptr++) * lv_conj(*b_ptr++);
291 }
292 *result = lv_conj(*result);
293}
294#endif /*LV_HAVE_NEON*/
295
296#endif /*INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H*/
297
298#ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
299#define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
300
301#include <stdio.h>
302#include <volk/volk_common.h>
303#include <volk/volk_complex.h>
304
305
306#ifdef LV_HAVE_AVX
307#include <immintrin.h>
308
310 const lv_32fc_t* input,
311 const lv_32fc_t* taps,
312 unsigned int num_points)
313{
314 // Partial sums for indices i, i+1, i+2 and i+3.
315 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
316 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
317
318 for (long unsigned i = 0; i < (num_points & ~3u); i += 4) {
319 /* Four complex elements a time are processed.
320 * (ar + j⋅ai)*conj(br + j⋅bi) =
321 * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
322 */
323
324 /* Load input and taps, split and duplicate real und imaginary parts of taps.
325 * a: | ai,i+3 | ar,i+3 | … | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
326 * b: | bi,i+3 | br,i+3 | … | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
327 * b_real: | br,i+3 | br,i+3 | … | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
328 * b_imag: | bi,i+3 | bi,i+3 | … | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
329 */
330 __m256 a = _mm256_load_ps((const float*)&input[i]);
331 __m256 b = _mm256_load_ps((const float*)&taps[i]);
332 __m256 b_real = _mm256_moveldup_ps(b);
333 __m256 b_imag = _mm256_movehdup_ps(b);
334
335 // Add | ai⋅br,i+3 | ar⋅br,i+3 | … | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
336 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
337 // Add | ai⋅bi,i+3 | −ar⋅bi,i+3 | … | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
338 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
339 }
340
341 // Swap position of −ar⋅bi and ai⋅bi.
342 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
343 // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains four such partial sums.
344 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
345 /* Sum the four partial sums: Add high half of vector sum to the low one, i.e.
346 * s1 + s3 and s0 + s2 …
347 */
348 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
349 // … and now (s0 + s2) + (s1 + s3)
350 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
351 // Store result.
352 __m128 lower = _mm256_extractf128_ps(sum, 0);
353 _mm_storel_pi((__m64*)result, lower);
354
355 // Handle the last elements if num_points mod 4 is bigger than 0.
356 for (long unsigned i = num_points & ~3u; i < num_points; ++i) {
357 *result += lv_cmake(lv_creal(input[i]) * lv_creal(taps[i]) +
358 lv_cimag(input[i]) * lv_cimag(taps[i]),
359 lv_cimag(input[i]) * lv_creal(taps[i]) -
360 lv_creal(input[i]) * lv_cimag(taps[i]));
361 }
362}
363#endif /* LV_HAVE_AVX */
364
365#ifdef LV_HAVE_SSE3
366
367#include <pmmintrin.h>
368#include <xmmintrin.h>
369
371 const lv_32fc_t* input,
372 const lv_32fc_t* taps,
373 unsigned int num_points)
374{
375 // Partial sums for indices i and i+1.
376 __m128 sum_a_mult_b_real = _mm_setzero_ps();
377 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
378
379 for (long unsigned i = 0; i < (num_points & ~1u); i += 2) {
380 /* Two complex elements a time are processed.
381 * (ar + j⋅ai)*conj(br + j⋅bi) =
382 * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
383 */
384
385 /* Load input and taps, split and duplicate real und imaginary parts of taps.
386 * a: | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
387 * b: | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
388 * b_real: | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
389 * b_imag: | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
390 */
391 __m128 a = _mm_load_ps((const float*)&input[i]);
392 __m128 b = _mm_load_ps((const float*)&taps[i]);
393 __m128 b_real = _mm_moveldup_ps(b);
394 __m128 b_imag = _mm_movehdup_ps(b);
395
396 // Add | ai⋅br,i+1 | ar⋅br,i+1 | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
397 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
398 // Add | ai⋅bi,i+1 | −ar⋅bi,i+1 | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
399 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
400 }
401
402 // Swap position of −ar⋅bi and ai⋅bi.
403 sum_a_mult_b_imag =
404 _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
405 // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains two such partial sums.
406 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
407 // Sum the two partial sums.
408 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
409 // Store result.
410 _mm_storel_pi((__m64*)result, sum);
411
412 // Handle the last element if num_points mod 2 is 1.
413 if (num_points & 1u) {
414 *result += lv_cmake(
415 lv_creal(input[num_points - 1]) * lv_creal(taps[num_points - 1]) +
416 lv_cimag(input[num_points - 1]) * lv_cimag(taps[num_points - 1]),
417 lv_cimag(input[num_points - 1]) * lv_creal(taps[num_points - 1]) -
418 lv_creal(input[num_points - 1]) * lv_cimag(taps[num_points - 1]));
419 }
420}
421
422#endif /*LV_HAVE_SSE3*/
423
424#ifdef LV_HAVE_RVV
425#include <riscv_vector.h>
427
428static inline void volk_32fc_x2_conjugate_dot_prod_32fc_rvv(lv_32fc_t* result,
429 const lv_32fc_t* input,
430 const lv_32fc_t* taps,
431 unsigned int num_points)
432{
433 vfloat32m2_t vsumr = __riscv_vfmv_v_f_f32m2(0, __riscv_vsetvlmax_e32m2());
434 vfloat32m2_t vsumi = vsumr;
435 size_t n = num_points;
436 for (size_t vl; n > 0; n -= vl, input += vl, taps += vl) {
437 vl = __riscv_vsetvl_e32m2(n);
438 vuint64m4_t va = __riscv_vle64_v_u64m4((const uint64_t*)input, vl);
439 vuint64m4_t vb = __riscv_vle64_v_u64m4((const uint64_t*)taps, vl);
440 vfloat32m2_t var = __riscv_vreinterpret_f32m2(__riscv_vnsrl(va, 0, vl));
441 vfloat32m2_t vbr = __riscv_vreinterpret_f32m2(__riscv_vnsrl(vb, 0, vl));
442 vfloat32m2_t vai = __riscv_vreinterpret_f32m2(__riscv_vnsrl(va, 32, vl));
443 vfloat32m2_t vbi = __riscv_vreinterpret_f32m2(__riscv_vnsrl(vb, 32, vl));
444 vbi = __riscv_vfneg(vbi, vl);
445 vfloat32m2_t vr = __riscv_vfnmsac(__riscv_vfmul(var, vbr, vl), vai, vbi, vl);
446 vfloat32m2_t vi = __riscv_vfmacc(__riscv_vfmul(var, vbi, vl), vai, vbr, vl);
447 vsumr = __riscv_vfadd_tu(vsumr, vsumr, vr, vl);
448 vsumi = __riscv_vfadd_tu(vsumi, vsumi, vi, vl);
449 }
450 size_t vl = __riscv_vsetvlmax_e32m1();
451 vfloat32m1_t vr = RISCV_SHRINK2(vfadd, f, 32, vsumr);
452 vfloat32m1_t vi = RISCV_SHRINK2(vfadd, f, 32, vsumi);
453 vfloat32m1_t z = __riscv_vfmv_s_f_f32m1(0, vl);
454 *result = lv_cmake(__riscv_vfmv_f(__riscv_vfredusum(vr, z, vl)),
455 __riscv_vfmv_f(__riscv_vfredusum(vi, z, vl)));
456}
457#endif /*LV_HAVE_RVV*/
458
459#ifdef LV_HAVE_RVVSEG
460#include <riscv_vector.h>
462
463static inline void volk_32fc_x2_conjugate_dot_prod_32fc_rvvseg(lv_32fc_t* result,
464 const lv_32fc_t* input,
465 const lv_32fc_t* taps,
466 unsigned int num_points)
467{
468 vfloat32m2_t vsumr = __riscv_vfmv_v_f_f32m2(0, __riscv_vsetvlmax_e32m2());
469 vfloat32m2_t vsumi = vsumr;
470 size_t n = num_points;
471 for (size_t vl; n > 0; n -= vl, input += vl, taps += vl) {
472 vl = __riscv_vsetvl_e32m2(n);
473 vfloat32m2x2_t va = __riscv_vlseg2e32_v_f32m2x2((const float*)input, vl);
474 vfloat32m2x2_t vb = __riscv_vlseg2e32_v_f32m2x2((const float*)taps, vl);
475 vfloat32m2_t var = __riscv_vget_f32m2(va, 0), vai = __riscv_vget_f32m2(va, 1);
476 vfloat32m2_t vbr = __riscv_vget_f32m2(vb, 0), vbi = __riscv_vget_f32m2(vb, 1);
477 vbi = __riscv_vfneg(vbi, vl);
478 vfloat32m2_t vr = __riscv_vfnmsac(__riscv_vfmul(var, vbr, vl), vai, vbi, vl);
479 vfloat32m2_t vi = __riscv_vfmacc(__riscv_vfmul(var, vbi, vl), vai, vbr, vl);
480 vsumr = __riscv_vfadd_tu(vsumr, vsumr, vr, vl);
481 vsumi = __riscv_vfadd_tu(vsumi, vsumi, vi, vl);
482 }
483 size_t vl = __riscv_vsetvlmax_e32m1();
484 vfloat32m1_t vr = RISCV_SHRINK2(vfadd, f, 32, vsumr);
485 vfloat32m1_t vi = RISCV_SHRINK2(vfadd, f, 32, vsumi);
486 vfloat32m1_t z = __riscv_vfmv_s_f_f32m1(0, vl);
487 *result = lv_cmake(__riscv_vfmv_f(__riscv_vfredusum(vr, z, vl)),
488 __riscv_vfmv_f(__riscv_vfredusum(vi, z, vl)));
489}
490#endif /*LV_HAVE_RVVSEG*/
491
492#endif /*INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H*/
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition volk_32fc_x2_conjugate_dot_prod_32fc.h:370
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition volk_32fc_x2_conjugate_dot_prod_32fc.h:309
static void volk_32fc_x2_conjugate_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition volk_32fc_x2_conjugate_dot_prod_32fc.h:70
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition volk_32fc_x2_conjugate_dot_prod_32fc.h:127
static void volk_32fc_x2_conjugate_dot_prod_32fc_block(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition volk_32fc_x2_conjugate_dot_prod_32fc.h:86
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition volk_32fc_x2_conjugate_dot_prod_32fc.h:189
static void volk_32fc_x2_conjugate_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition volk_32fc_x2_conjugate_dot_prod_32fc.h:245
#define __VOLK_PREFETCH(addr)
Definition volk_common.h:68
#define lv_cimag(x)
Definition volk_complex.h:98
#define lv_conj(x)
Definition volk_complex.h:100
#define lv_cmake(r, i)
Definition volk_complex.h:77
#define lv_creal(x)
Definition volk_complex.h:96
float complex lv_32fc_t
Definition volk_complex.h:74
for i
Definition volk_config_fixed.tmpl.h:13
#define RISCV_SHRINK2(op, T, S, v)
Definition volk_rvv_intrinsics.h:19