35#ifndef INCLUDED_volk_16ic_x2_dot_prod_16ic_H
36#define INCLUDED_volk_16ic_x2_dot_prod_16ic_H
48 unsigned int num_points)
50 result[0] =
lv_cmake((int16_t)0, (int16_t)0);
52 for (n = 0; n < num_points; n++) {
68 unsigned int num_points)
72 const unsigned int sse_iters = num_points / 4;
80 __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl,
84 realcacc = _mm_setzero_si128();
85 imagcacc = _mm_setzero_si128();
87 mask_imag = _mm_set_epi8(
88 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
89 mask_real = _mm_set_epi8(
90 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
92 for (number = 0; number < sse_iters; number++) {
97 b = _mm_load_si128((__m128i*)_in_b);
99 c = _mm_mullo_epi16(a, b);
101 c_sr = _mm_srli_si128(c, 2);
103 real = _mm_subs_epi16(c, c_sr);
105 b_sl = _mm_slli_si128(b, 2);
106 a_sl = _mm_slli_si128(a, 2);
108 imag1 = _mm_mullo_epi16(a, b_sl);
109 imag2 = _mm_mullo_epi16(b, a_sl);
111 imag = _mm_adds_epi16(imag1, imag2);
113 realcacc = _mm_adds_epi16(realcacc, real);
114 imagcacc = _mm_adds_epi16(imagcacc, imag);
120 realcacc = _mm_and_si128(realcacc, mask_real);
121 imagcacc = _mm_and_si128(imagcacc, mask_imag);
123 a = _mm_or_si128(realcacc, imagcacc);
125 _mm_store_si128((__m128i*)dotProductVector,
128 for (number = 0; number < 4; ++number) {
135 for (number = 0; number < (num_points % 4); ++number) {
148#include <emmintrin.h>
153 unsigned int num_points)
157 const unsigned int sse_iters = num_points / 4;
165 __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl,
166 realcacc, imagcacc, result;
169 realcacc = _mm_setzero_si128();
170 imagcacc = _mm_setzero_si128();
172 mask_imag = _mm_set_epi8(
173 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
174 mask_real = _mm_set_epi8(
175 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
177 for (number = 0; number < sse_iters; number++) {
182 b = _mm_loadu_si128((__m128i*)_in_b);
184 c = _mm_mullo_epi16(a, b);
186 c_sr = _mm_srli_si128(c, 2);
188 real = _mm_subs_epi16(c, c_sr);
190 b_sl = _mm_slli_si128(b, 2);
191 a_sl = _mm_slli_si128(a, 2);
193 imag1 = _mm_mullo_epi16(a, b_sl);
194 imag2 = _mm_mullo_epi16(b, a_sl);
196 imag = _mm_adds_epi16(imag1, imag2);
198 realcacc = _mm_adds_epi16(realcacc, real);
199 imagcacc = _mm_adds_epi16(imagcacc, imag);
205 realcacc = _mm_and_si128(realcacc, mask_real);
206 imagcacc = _mm_and_si128(imagcacc, mask_imag);
208 result = _mm_or_si128(realcacc, imagcacc);
210 _mm_storeu_si128((__m128i*)dotProductVector,
213 for (number = 0; number < 4; ++number) {
220 for (number = 0; number < (num_points % 4); ++number) {
232#include <immintrin.h>
234static inline void volk_16ic_x2_dot_prod_16ic_u_avx2(
lv_16sc_t* out,
237 unsigned int num_points)
241 const unsigned int avx_iters = num_points / 8;
249 __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl,
250 realcacc, imagcacc, result;
253 realcacc = _mm256_setzero_si256();
254 imagcacc = _mm256_setzero_si256();
256 mask_imag = _mm256_set_epi8(0xFF,
288 mask_real = _mm256_set_epi8(0,
321 for (number = 0; number < avx_iters; number++) {
322 a = _mm256_loadu_si256((__m256i*)_in_a);
324 b = _mm256_loadu_si256((__m256i*)_in_b);
326 c = _mm256_mullo_epi16(a, b);
328 c_sr = _mm256_srli_si256(c, 2);
330 real = _mm256_subs_epi16(c, c_sr);
332 b_sl = _mm256_slli_si256(b, 2);
333 a_sl = _mm256_slli_si256(a, 2);
335 imag1 = _mm256_mullo_epi16(a, b_sl);
336 imag2 = _mm256_mullo_epi16(b, a_sl);
338 imag = _mm256_adds_epi16(imag1, imag2);
340 realcacc = _mm256_adds_epi16(realcacc, real);
341 imagcacc = _mm256_adds_epi16(imagcacc, imag);
347 realcacc = _mm256_and_si256(realcacc, mask_real);
348 imagcacc = _mm256_and_si256(imagcacc, mask_imag);
350 result = _mm256_or_si256(realcacc, imagcacc);
352 _mm256_storeu_si256((__m256i*)dotProductVector,
355 for (number = 0; number < 8; ++number) {
362 for (number = 0; number < (num_points % 8); ++number) {
374#include <immintrin.h>
376static inline void volk_16ic_x2_dot_prod_16ic_a_avx2(
lv_16sc_t* out,
379 unsigned int num_points)
383 const unsigned int avx_iters = num_points / 8;
391 __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl,
392 realcacc, imagcacc, result;
395 realcacc = _mm256_setzero_si256();
396 imagcacc = _mm256_setzero_si256();
398 mask_imag = _mm256_set_epi8(0xFF,
430 mask_real = _mm256_set_epi8(0,
463 for (number = 0; number < avx_iters; number++) {
464 a = _mm256_load_si256((__m256i*)_in_a);
466 b = _mm256_load_si256((__m256i*)_in_b);
468 c = _mm256_mullo_epi16(a, b);
470 c_sr = _mm256_srli_si256(c, 2);
472 real = _mm256_subs_epi16(c, c_sr);
474 b_sl = _mm256_slli_si256(b, 2);
475 a_sl = _mm256_slli_si256(a, 2);
477 imag1 = _mm256_mullo_epi16(a, b_sl);
478 imag2 = _mm256_mullo_epi16(b, a_sl);
480 imag = _mm256_adds_epi16(imag1, imag2);
482 realcacc = _mm256_adds_epi16(realcacc, real);
483 imagcacc = _mm256_adds_epi16(imagcacc, imag);
489 realcacc = _mm256_and_si256(realcacc, mask_real);
490 imagcacc = _mm256_and_si256(imagcacc, mask_imag);
492 result = _mm256_or_si256(realcacc, imagcacc);
494 _mm256_store_si256((__m256i*)dotProductVector,
497 for (number = 0; number < 8; ++number) {
504 for (number = 0; number < (num_points % 8); ++number) {
521 unsigned int num_points)
523 unsigned int quarter_points = num_points / 4;
528 *out =
lv_cmake((int16_t)0, (int16_t)0);
530 if (quarter_points > 0) {
533 int16x4x2_t a_val, b_val, c_val, accumulator;
534 int16x4x2_t tmp_real, tmp_imag;
536 accumulator.val[0] = vdup_n_s16(0);
537 accumulator.val[1] = vdup_n_s16(0);
540 for (number = 0; number < quarter_points; ++number) {
541 a_val = vld2_s16((int16_t*)a_ptr);
542 b_val = vld2_s16((int16_t*)b_ptr);
548 tmp_real.val[0] = vmul_s16(a_val.val[0], b_val.val[0]);
550 tmp_real.val[1] = vmul_s16(a_val.val[1], b_val.val[1]);
554 tmp_imag.val[0] = vmul_s16(a_val.val[0], b_val.val[1]);
556 tmp_imag.val[1] = vmul_s16(a_val.val[1], b_val.val[0]);
558 c_val.val[0] = vqsub_s16(tmp_real.val[0], tmp_real.val[1]);
559 c_val.val[1] = vqadd_s16(tmp_imag.val[0], tmp_imag.val[1]);
561 accumulator.val[0] = vqadd_s16(accumulator.val[0], c_val.val[0]);
562 accumulator.val[1] = vqadd_s16(accumulator.val[1], c_val.val[1]);
568 vst2_s16((int16_t*)accum_result, accumulator);
569 for (number = 0; number < 4; ++number) {
579 for (number = quarter_points * 4; number < num_points; ++number) {
580 *out += (*a_ptr++) * (*b_ptr++);
593 unsigned int num_points)
595 unsigned int quarter_points = num_points / 4;
602 int16x4x2_t a_val, b_val, accumulator;
605 accumulator.val[0] = vdup_n_s16(0);
606 accumulator.val[1] = vdup_n_s16(0);
608 for (number = 0; number < quarter_points; ++number) {
609 a_val = vld2_s16((int16_t*)a_ptr);
610 b_val = vld2_s16((int16_t*)b_ptr);
614 tmp.val[0] = vmul_s16(a_val.val[0], b_val.val[0]);
615 tmp.val[1] = vmul_s16(a_val.val[1], b_val.val[0]);
618 tmp.val[0] = vmls_s16(tmp.val[0], a_val.val[1], b_val.val[1]);
619 tmp.val[1] = vmla_s16(tmp.val[1], a_val.val[0], b_val.val[1]);
621 accumulator.val[0] = vqadd_s16(accumulator.val[0], tmp.val[0]);
622 accumulator.val[1] = vqadd_s16(accumulator.val[1], tmp.val[1]);
628 vst2_s16((int16_t*)accum_result, accumulator);
629 *out = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
632 for (number = quarter_points * 4; number < num_points; ++number) {
633 *out += (*a_ptr++) * (*b_ptr++);
646 unsigned int num_points)
648 unsigned int quarter_points = num_points / 4;
655 int16x4x2_t a_val, b_val, accumulator1, accumulator2;
658 accumulator1.val[0] = vdup_n_s16(0);
659 accumulator1.val[1] = vdup_n_s16(0);
660 accumulator2.val[0] = vdup_n_s16(0);
661 accumulator2.val[1] = vdup_n_s16(0);
663 for (number = 0; number < quarter_points; ++number) {
664 a_val = vld2_s16((int16_t*)a_ptr);
665 b_val = vld2_s16((int16_t*)b_ptr);
670 accumulator1.val[0] = vmla_s16(accumulator1.val[0], a_val.val[0], b_val.val[0]);
671 accumulator2.val[0] = vmls_s16(accumulator2.val[0], a_val.val[1], b_val.val[1]);
672 accumulator1.val[1] = vmla_s16(accumulator1.val[1], a_val.val[0], b_val.val[1]);
673 accumulator2.val[1] = vmla_s16(accumulator2.val[1], a_val.val[1], b_val.val[0]);
679 accumulator1.val[0] = vqadd_s16(accumulator1.val[0], accumulator2.val[0]);
680 accumulator1.val[1] = vqadd_s16(accumulator1.val[1], accumulator2.val[1]);
682 vst2_s16((int16_t*)accum_result, accumulator1);
683 *out = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
686 for (number = quarter_points * 4; number < num_points; ++number) {
687 *out += (*a_ptr++) * (*b_ptr++);
697static inline void volk_16ic_x2_dot_prod_16ic_rvv(
lv_16sc_t* result,
700 unsigned int num_points)
702 vint16m4_t vsumr = __riscv_vmv_v_x_i16m4(0, __riscv_vsetvlmax_e16m4());
703 vint16m4_t vsumi = vsumr;
704 size_t n = num_points;
705 for (
size_t vl; n > 0; n -= vl, in_a += vl, in_b += vl) {
706 vl = __riscv_vsetvl_e16m4(n);
707 vint32m8_t va = __riscv_vle32_v_i32m8((
const int32_t*)in_a, vl);
708 vint32m8_t vb = __riscv_vle32_v_i32m8((
const int32_t*)in_b, vl);
709 vint16m4_t var = __riscv_vnsra(va, 0, vl), vai = __riscv_vnsra(va, 16, vl);
710 vint16m4_t vbr = __riscv_vnsra(vb, 0, vl), vbi = __riscv_vnsra(vb, 16, vl);
711 vint16m4_t vr = __riscv_vnmsac(__riscv_vmul(var, vbr, vl), vai, vbi, vl);
712 vint16m4_t vi = __riscv_vmacc(__riscv_vmul(var, vbi, vl), vai, vbr, vl);
713 vsumr = __riscv_vadd_tu(vsumr, vsumr, vr, vl);
714 vsumi = __riscv_vadd_tu(vsumi, vsumi, vi, vl);
716 size_t vl = __riscv_vsetvlmax_e16m1();
719 vint16m1_t z = __riscv_vmv_s_x_i16m1(0, vl);
720 *result =
lv_cmake(__riscv_vmv_x(__riscv_vredsum(vr, z, vl)),
721 __riscv_vmv_x(__riscv_vredsum(vi, z, vl)));
729static inline void volk_16ic_x2_dot_prod_16ic_rvvseg(
lv_16sc_t* result,
732 unsigned int num_points)
734 vint16m4_t vsumr = __riscv_vmv_v_x_i16m4(0, __riscv_vsetvlmax_e16m4());
735 vint16m4_t vsumi = vsumr;
736 size_t n = num_points;
737 for (
size_t vl; n > 0; n -= vl, in_a += vl, in_b += vl) {
738 vl = __riscv_vsetvl_e16m4(n);
739 vint16m4x2_t va = __riscv_vlseg2e16_v_i16m4x2((
const int16_t*)in_a, vl);
740 vint16m4x2_t vb = __riscv_vlseg2e16_v_i16m4x2((
const int16_t*)in_b, vl);
741 vint16m4_t var = __riscv_vget_i16m4(va, 0), vai = __riscv_vget_i16m4(va, 1);
742 vint16m4_t vbr = __riscv_vget_i16m4(vb, 0), vbi = __riscv_vget_i16m4(vb, 1);
743 vint16m4_t vr = __riscv_vnmsac(__riscv_vmul(var, vbr, vl), vai, vbi, vl);
744 vint16m4_t vi = __riscv_vmacc(__riscv_vmul(var, vbi, vl), vai, vbr, vl);
745 vsumr = __riscv_vadd_tu(vsumr, vsumr, vr, vl);
746 vsumi = __riscv_vadd_tu(vsumi, vsumi, vi, vl);
748 size_t vl = __riscv_vsetvlmax_e16m1();
751 vint16m1_t z = __riscv_vmv_s_x_i16m1(0, vl);
752 *result =
lv_cmake(__riscv_vmv_x(__riscv_vredsum(vr, z, vl)),
753 __riscv_vmv_x(__riscv_vredsum(vi, z, vl)));
static int16_t sat_adds16i(int16_t x, int16_t y)
Definition saturation_arithmetic.h:16
static void volk_16ic_x2_dot_prod_16ic_neon_optvma(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition volk_16ic_x2_dot_prod_16ic.h:643
static void volk_16ic_x2_dot_prod_16ic_generic(lv_16sc_t *result, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition volk_16ic_x2_dot_prod_16ic.h:45
static void volk_16ic_x2_dot_prod_16ic_a_sse2(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition volk_16ic_x2_dot_prod_16ic.h:65
static void volk_16ic_x2_dot_prod_16ic_neon_vma(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition volk_16ic_x2_dot_prod_16ic.h:590
static void volk_16ic_x2_dot_prod_16ic_u_sse2(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition volk_16ic_x2_dot_prod_16ic.h:150
static void volk_16ic_x2_dot_prod_16ic_neon(lv_16sc_t *out, const lv_16sc_t *in_a, const lv_16sc_t *in_b, unsigned int num_points)
Definition volk_16ic_x2_dot_prod_16ic.h:518
#define __VOLK_PREFETCH(addr)
Definition volk_common.h:68
#define __VOLK_ATTR_ALIGNED(x)
Definition volk_common.h:62
#define lv_cimag(x)
Definition volk_complex.h:98
#define lv_cmake(r, i)
Definition volk_complex.h:77
#define lv_creal(x)
Definition volk_complex.h:96
short complex lv_16sc_t
Definition volk_complex.h:71
for i
Definition volk_config_fixed.tmpl.h:13
#define RISCV_SHRINK4(op, T, S, v)
Definition volk_rvv_intrinsics.h:24