71#ifndef INCLUDED_volk_32f_x3_sum_of_poly_32f_a_H
72#define INCLUDED_volk_32f_x3_sum_of_poly_32f_a_H
79#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
88 float* center_point_array,
90 unsigned int num_points)
98 __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10;
100 xmm9 = _mm_setzero_ps();
101 xmm1 = _mm_setzero_ps();
102 xmm0 = _mm_load1_ps(¢er_point_array[0]);
103 xmm6 = _mm_load1_ps(¢er_point_array[1]);
104 xmm7 = _mm_load1_ps(¢er_point_array[2]);
105 xmm8 = _mm_load1_ps(¢er_point_array[3]);
106 xmm10 = _mm_load1_ps(cutoff);
108 int bound = num_points / 8;
109 int leftovers = num_points - 8 * bound;
111 for (;
i < bound; ++
i) {
113 xmm2 = _mm_load_ps(src0);
114 xmm2 = _mm_max_ps(xmm10, xmm2);
115 xmm3 = _mm_mul_ps(xmm2, xmm2);
116 xmm4 = _mm_mul_ps(xmm2, xmm3);
117 xmm5 = _mm_mul_ps(xmm3, xmm3);
119 xmm2 = _mm_mul_ps(xmm2, xmm0);
120 xmm3 = _mm_mul_ps(xmm3, xmm6);
121 xmm4 = _mm_mul_ps(xmm4, xmm7);
122 xmm5 = _mm_mul_ps(xmm5, xmm8);
124 xmm2 = _mm_add_ps(xmm2, xmm3);
125 xmm3 = _mm_add_ps(xmm4, xmm5);
129 xmm9 = _mm_add_ps(xmm2, xmm9);
130 xmm9 = _mm_add_ps(xmm3, xmm9);
133 xmm2 = _mm_load_ps(src0);
134 xmm2 = _mm_max_ps(xmm10, xmm2);
135 xmm3 = _mm_mul_ps(xmm2, xmm2);
136 xmm4 = _mm_mul_ps(xmm2, xmm3);
137 xmm5 = _mm_mul_ps(xmm3, xmm3);
139 xmm2 = _mm_mul_ps(xmm2, xmm0);
140 xmm3 = _mm_mul_ps(xmm3, xmm6);
141 xmm4 = _mm_mul_ps(xmm4, xmm7);
142 xmm5 = _mm_mul_ps(xmm5, xmm8);
144 xmm2 = _mm_add_ps(xmm2, xmm3);
145 xmm3 = _mm_add_ps(xmm4, xmm5);
149 xmm1 = _mm_add_ps(xmm2, xmm1);
150 xmm1 = _mm_add_ps(xmm3, xmm1);
152 xmm2 = _mm_hadd_ps(xmm9, xmm1);
153 xmm3 = _mm_hadd_ps(xmm2, xmm2);
154 xmm4 = _mm_hadd_ps(xmm3, xmm3);
155 _mm_store_ss(&result, xmm4);
157 for (
i = 0;
i < leftovers; ++
i) {
159 fst =
MAX(fst, *cutoff);
163 result += (center_point_array[0] * fst + center_point_array[1] * sq +
164 center_point_array[2] * thrd + center_point_array[3] * frth);
167 result += (float)(num_points)*center_point_array[4];
174#if LV_HAVE_AVX && LV_HAVE_FMA
175#include <immintrin.h>
177static inline void volk_32f_x3_sum_of_poly_32f_a_avx2_fma(
float* target,
179 float* center_point_array,
181 unsigned int num_points)
183 const unsigned int eighth_points = num_points / 8;
189 __m256 cpa0, cpa1, cpa2, cpa3, cutoff_vec;
191 __m256 x_to_1, x_to_2, x_to_3, x_to_4;
193 cpa0 = _mm256_set1_ps(center_point_array[0]);
194 cpa1 = _mm256_set1_ps(center_point_array[1]);
195 cpa2 = _mm256_set1_ps(center_point_array[2]);
196 cpa3 = _mm256_set1_ps(center_point_array[3]);
197 cutoff_vec = _mm256_set1_ps(*cutoff);
198 target_vec = _mm256_setzero_ps();
202 for (
i = 0;
i < eighth_points; ++
i) {
203 x_to_1 = _mm256_load_ps(src0);
204 x_to_1 = _mm256_max_ps(x_to_1, cutoff_vec);
205 x_to_2 = _mm256_mul_ps(x_to_1, x_to_1);
206 x_to_3 = _mm256_mul_ps(x_to_1, x_to_2);
208 x_to_4 = _mm256_mul_ps(x_to_1, x_to_3);
210 x_to_2 = _mm256_mul_ps(x_to_2, cpa1);
211 x_to_4 = _mm256_mul_ps(x_to_4, cpa3);
213 x_to_1 = _mm256_fmadd_ps(x_to_1, cpa0, x_to_2);
214 x_to_3 = _mm256_fmadd_ps(x_to_3, cpa2, x_to_4);
216 target_vec = _mm256_add_ps(x_to_1, target_vec);
217 target_vec = _mm256_add_ps(x_to_3, target_vec);
224 target_vec = _mm256_hadd_ps(
227 _mm256_store_ps(temp_results, target_vec);
228 *target = temp_results[0] + temp_results[1] + temp_results[4] + temp_results[5];
230 for (
i = eighth_points * 8;
i < num_points; ++
i) {
232 fst =
MAX(fst, *cutoff);
236 *target += (center_point_array[0] * fst + center_point_array[1] * sq +
237 center_point_array[2] * thrd + center_point_array[3] * frth);
239 *target += (float)(num_points)*center_point_array[4];
244#include <immintrin.h>
248 float* center_point_array,
250 unsigned int num_points)
252 const unsigned int eighth_points = num_points / 8;
258 __m256 cpa0, cpa1, cpa2, cpa3, cutoff_vec;
260 __m256 x_to_1, x_to_2, x_to_3, x_to_4;
262 cpa0 = _mm256_set1_ps(center_point_array[0]);
263 cpa1 = _mm256_set1_ps(center_point_array[1]);
264 cpa2 = _mm256_set1_ps(center_point_array[2]);
265 cpa3 = _mm256_set1_ps(center_point_array[3]);
266 cutoff_vec = _mm256_set1_ps(*cutoff);
267 target_vec = _mm256_setzero_ps();
271 for (
i = 0;
i < eighth_points; ++
i) {
272 x_to_1 = _mm256_load_ps(src0);
273 x_to_1 = _mm256_max_ps(x_to_1, cutoff_vec);
274 x_to_2 = _mm256_mul_ps(x_to_1, x_to_1);
275 x_to_3 = _mm256_mul_ps(x_to_1, x_to_2);
277 x_to_4 = _mm256_mul_ps(x_to_1, x_to_3);
279 x_to_1 = _mm256_mul_ps(x_to_1, cpa0);
280 x_to_2 = _mm256_mul_ps(x_to_2, cpa1);
281 x_to_3 = _mm256_mul_ps(x_to_3, cpa2);
282 x_to_4 = _mm256_mul_ps(x_to_4, cpa3);
284 x_to_1 = _mm256_add_ps(x_to_1, x_to_2);
285 x_to_3 = _mm256_add_ps(x_to_3, x_to_4);
287 target_vec = _mm256_add_ps(x_to_1, target_vec);
288 target_vec = _mm256_add_ps(x_to_3, target_vec);
295 target_vec = _mm256_hadd_ps(
298 _mm256_store_ps(temp_results, target_vec);
299 *target = temp_results[0] + temp_results[1] + temp_results[4] + temp_results[5];
301 for (
i = eighth_points * 8;
i < num_points; ++
i) {
303 fst =
MAX(fst, *cutoff);
307 *target += (center_point_array[0] * fst + center_point_array[1] * sq +
308 center_point_array[2] * thrd + center_point_array[3] * frth);
310 *target += (float)(num_points)*center_point_array[4];
315#ifdef LV_HAVE_GENERIC
319 float* center_point_array,
321 unsigned int num_points)
323 const unsigned int eighth_points = num_points / 8;
325 float result[8] = { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
333 for (
i = 0;
i < eighth_points; ++
i) {
334 for (k = 0; k < 8; ++k) {
336 fst =
MAX(fst, *cutoff);
340 result[k] += center_point_array[0] * fst + center_point_array[1] * sq;
341 result[k] += center_point_array[2] * thrd + center_point_array[3] * frth;
344 for (k = 0; k < 8; k += 2) {
345 result[k] = result[k] + result[k + 1];
348 *target = result[0] + result[2] + result[4] + result[6];
350 for (
i = eighth_points * 8;
i < num_points; ++
i) {
352 fst =
MAX(fst, *cutoff);
356 *target += (center_point_array[0] * fst + center_point_array[1] * sq +
357 center_point_array[2] * thrd + center_point_array[3] * frth);
359 *target += (float)(num_points)*center_point_array[4];
369 float* __restrict src0,
370 float* __restrict center_point_array,
371 float* __restrict cutoff,
372 unsigned int num_points)
375 float zero[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
377 float32x2_t x_to_1, x_to_2, x_to_3, x_to_4;
378 float32x2_t cutoff_vector;
379 float32x2x2_t x_low, x_high;
380 float32x4_t x_qvector, c_qvector, cpa_qvector;
382 float res_accumulators[4];
384 c_qvector = vld1q_f32(zero);
386 cutoff_vector = vdup_n_f32(*cutoff);
388 cpa_qvector = vld1q_f32(center_point_array);
390 for (
i = 0;
i < num_points; ++
i) {
392 x_to_1 = vdup_n_f32(*src0++);
395 x_to_1 = vmax_f32(x_to_1, cutoff_vector);
396 x_to_2 = vmul_f32(x_to_1, x_to_1);
397 x_to_3 = vmul_f32(x_to_2, x_to_1);
398 x_to_4 = vmul_f32(x_to_3, x_to_1);
400 x_low = vzip_f32(x_to_1, x_to_2);
401 x_high = vzip_f32(x_to_3, x_to_4);
403 x_qvector = vcombine_f32(x_low.val[0], x_high.val[0]);
406 c_qvector = vmlaq_f32(c_qvector, x_qvector, cpa_qvector);
409 vst1q_f32(res_accumulators, c_qvector);
410 accumulator = res_accumulators[0] + res_accumulators[1] + res_accumulators[2] +
413 *target = accumulator + (float)num_points * center_point_array[4];
423 float* __restrict src0,
424 float* __restrict center_point_array,
425 float* __restrict cutoff,
426 unsigned int num_points)
429 float zero[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
433 float32x4_t accumulator1_vec, accumulator2_vec, accumulator3_vec, accumulator4_vec;
434 accumulator1_vec = vld1q_f32(zero);
435 accumulator2_vec = vld1q_f32(zero);
436 accumulator3_vec = vld1q_f32(zero);
437 accumulator4_vec = vld1q_f32(zero);
438 float32x4_t x_to_1, x_to_2, x_to_3, x_to_4;
439 float32x4_t cutoff_vector, cpa_0, cpa_1, cpa_2, cpa_3;
442 cutoff_vector = vdupq_n_f32(*cutoff);
444 cpa_0 = vdupq_n_f32(center_point_array[0]);
445 cpa_1 = vdupq_n_f32(center_point_array[1]);
446 cpa_2 = vdupq_n_f32(center_point_array[2]);
447 cpa_3 = vdupq_n_f32(center_point_array[3]);
450 for (
i = 0;
i < num_points / 4; ++
i) {
452 x_to_1 = vld1q_f32(src0);
455 x_to_1 = vmaxq_f32(x_to_1, cutoff_vector);
456 x_to_2 = vmulq_f32(x_to_1, x_to_1);
457 x_to_3 = vmulq_f32(x_to_2, x_to_1);
458 x_to_4 = vmulq_f32(x_to_3, x_to_1);
459 x_to_1 = vmulq_f32(x_to_1, cpa_0);
460 x_to_2 = vmulq_f32(x_to_2, cpa_1);
461 x_to_3 = vmulq_f32(x_to_3, cpa_2);
462 x_to_4 = vmulq_f32(x_to_4, cpa_3);
463 accumulator1_vec = vaddq_f32(accumulator1_vec, x_to_1);
464 accumulator2_vec = vaddq_f32(accumulator2_vec, x_to_2);
465 accumulator3_vec = vaddq_f32(accumulator3_vec, x_to_3);
466 accumulator4_vec = vaddq_f32(accumulator4_vec, x_to_4);
470 accumulator1_vec = vaddq_f32(accumulator1_vec, accumulator2_vec);
471 accumulator3_vec = vaddq_f32(accumulator3_vec, accumulator4_vec);
472 accumulator1_vec = vaddq_f32(accumulator1_vec, accumulator3_vec);
475 vst1q_f32(res_accumulators, accumulator1_vec);
476 accumulator = res_accumulators[0] + res_accumulators[1] + res_accumulators[2] +
484 for (
i = 4 * (num_points / 4);
i < num_points; ++
i) {
486 fst =
MAX(fst, *cutoff);
493 accumulator += (center_point_array[0] * fst + center_point_array[1] * sq +
494 center_point_array[2] * thrd + center_point_array[3] * frth);
497 *target = accumulator + (float)num_points * center_point_array[4];
504#ifndef INCLUDED_volk_32f_x3_sum_of_poly_32f_u_H
505#define INCLUDED_volk_32f_x3_sum_of_poly_32f_u_H
512#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
515#if LV_HAVE_AVX && LV_HAVE_FMA
516#include <immintrin.h>
518static inline void volk_32f_x3_sum_of_poly_32f_u_avx_fma(
float* target,
520 float* center_point_array,
522 unsigned int num_points)
524 const unsigned int eighth_points = num_points / 8;
530 __m256 cpa0, cpa1, cpa2, cpa3, cutoff_vec;
532 __m256 x_to_1, x_to_2, x_to_3, x_to_4;
534 cpa0 = _mm256_set1_ps(center_point_array[0]);
535 cpa1 = _mm256_set1_ps(center_point_array[1]);
536 cpa2 = _mm256_set1_ps(center_point_array[2]);
537 cpa3 = _mm256_set1_ps(center_point_array[3]);
538 cutoff_vec = _mm256_set1_ps(*cutoff);
539 target_vec = _mm256_setzero_ps();
543 for (
i = 0;
i < eighth_points; ++
i) {
544 x_to_1 = _mm256_loadu_ps(src0);
545 x_to_1 = _mm256_max_ps(x_to_1, cutoff_vec);
546 x_to_2 = _mm256_mul_ps(x_to_1, x_to_1);
547 x_to_3 = _mm256_mul_ps(x_to_1, x_to_2);
549 x_to_4 = _mm256_mul_ps(x_to_1, x_to_3);
551 x_to_2 = _mm256_mul_ps(x_to_2, cpa1);
552 x_to_4 = _mm256_mul_ps(x_to_4, cpa3);
554 x_to_1 = _mm256_fmadd_ps(x_to_1, cpa0, x_to_2);
555 x_to_3 = _mm256_fmadd_ps(x_to_3, cpa2, x_to_4);
557 target_vec = _mm256_add_ps(x_to_1, target_vec);
558 target_vec = _mm256_add_ps(x_to_3, target_vec);
565 target_vec = _mm256_hadd_ps(
568 _mm256_storeu_ps(temp_results, target_vec);
569 *target = temp_results[0] + temp_results[1] + temp_results[4] + temp_results[5];
571 for (
i = eighth_points * 8;
i < num_points; ++
i) {
573 fst =
MAX(fst, *cutoff);
577 *target += (center_point_array[0] * fst + center_point_array[1] * sq +
578 center_point_array[2] * thrd + center_point_array[3] * frth);
581 *target += (float)(num_points)*center_point_array[4];
586#include <immintrin.h>
590 float* center_point_array,
592 unsigned int num_points)
594 const unsigned int eighth_points = num_points / 8;
600 __m256 cpa0, cpa1, cpa2, cpa3, cutoff_vec;
602 __m256 x_to_1, x_to_2, x_to_3, x_to_4;
604 cpa0 = _mm256_set1_ps(center_point_array[0]);
605 cpa1 = _mm256_set1_ps(center_point_array[1]);
606 cpa2 = _mm256_set1_ps(center_point_array[2]);
607 cpa3 = _mm256_set1_ps(center_point_array[3]);
608 cutoff_vec = _mm256_set1_ps(*cutoff);
609 target_vec = _mm256_setzero_ps();
613 for (
i = 0;
i < eighth_points; ++
i) {
614 x_to_1 = _mm256_loadu_ps(src0);
615 x_to_1 = _mm256_max_ps(x_to_1, cutoff_vec);
616 x_to_2 = _mm256_mul_ps(x_to_1, x_to_1);
617 x_to_3 = _mm256_mul_ps(x_to_1, x_to_2);
619 x_to_4 = _mm256_mul_ps(x_to_1, x_to_3);
621 x_to_1 = _mm256_mul_ps(x_to_1, cpa0);
622 x_to_2 = _mm256_mul_ps(x_to_2, cpa1);
623 x_to_3 = _mm256_mul_ps(x_to_3, cpa2);
624 x_to_4 = _mm256_mul_ps(x_to_4, cpa3);
626 x_to_1 = _mm256_add_ps(x_to_1, x_to_2);
627 x_to_3 = _mm256_add_ps(x_to_3, x_to_4);
629 target_vec = _mm256_add_ps(x_to_1, target_vec);
630 target_vec = _mm256_add_ps(x_to_3, target_vec);
637 target_vec = _mm256_hadd_ps(
640 _mm256_storeu_ps(temp_results, target_vec);
641 *target = temp_results[0] + temp_results[1] + temp_results[4] + temp_results[5];
643 for (
i = eighth_points * 8;
i < num_points; ++
i) {
645 fst =
MAX(fst, *cutoff);
650 *target += (center_point_array[0] * fst + center_point_array[1] * sq +
651 center_point_array[2] * thrd + center_point_array[3] * frth);
654 *target += (float)(num_points)*center_point_array[4];
659#include <riscv_vector.h>
662static inline void volk_32f_x3_sum_of_poly_32f_rvv(
float* target,
664 float* center_point_array,
666 unsigned int num_points)
668 size_t vlmax = __riscv_vsetvlmax_e32m4();
669 vfloat32m4_t vsum = __riscv_vfmv_v_f_f32m4(0, vlmax);
670 float mul1 = center_point_array[0];
671 float mul2 = center_point_array[1];
672 vfloat32m4_t vmul3 = __riscv_vfmv_v_f_f32m4(center_point_array[2], vlmax);
673 vfloat32m4_t vmul4 = __riscv_vfmv_v_f_f32m4(center_point_array[3], vlmax);
674 vfloat32m4_t vmax = __riscv_vfmv_v_f_f32m4(*cutoff, vlmax);
676 size_t n = num_points;
677 for (
size_t vl; n > 0; n -= vl, src0 += vl) {
678 vl = __riscv_vsetvl_e32m4(n);
679 vfloat32m4_t v = __riscv_vle32_v_f32m4(src0, vl);
680 vfloat32m4_t v1 = __riscv_vfmax(v, vmax, vl);
681 vfloat32m4_t v2 = __riscv_vfmul(v1, v1, vl);
682 vfloat32m4_t v3 = __riscv_vfmul(v1, v2, vl);
683 vfloat32m4_t v4 = __riscv_vfmul(v2, v2, vl);
684 v2 = __riscv_vfmul(v2, mul2, vl);
685 v4 = __riscv_vfmul(v4, vmul4, vl);
686 v1 = __riscv_vfmadd(v1, mul1, v2, vl);
687 v3 = __riscv_vfmadd(v3, vmul3, v4, vl);
688 v1 = __riscv_vfadd(v1, v3, vl);
689 vsum = __riscv_vfadd_tu(vsum, vsum, v1, vl);
691 size_t vl = __riscv_vsetvlmax_e32m1();
693 vfloat32m1_t z = __riscv_vfmv_s_f_f32m1(0, vl);
694 float sum = __riscv_vfmv_f(__riscv_vfredusum(v, z, vl));
695 *target = sum + num_points * center_point_array[4];
static void volk_32f_x3_sum_of_poly_32f_u_avx(float *target, float *src0, float *center_point_array, float *cutoff, unsigned int num_points)
Definition volk_32f_x3_sum_of_poly_32f.h:588
static void volk_32f_x3_sum_of_poly_32f_a_sse3(float *target, float *src0, float *center_point_array, float *cutoff, unsigned int num_points)
Definition volk_32f_x3_sum_of_poly_32f.h:86
static void volk_32f_x3_sum_of_poly_32f_neonvert(float *__restrict target, float *__restrict src0, float *__restrict center_point_array, float *__restrict cutoff, unsigned int num_points)
Definition volk_32f_x3_sum_of_poly_32f.h:422
static void volk_32f_x3_sum_of_poly_32f_a_neon(float *__restrict target, float *__restrict src0, float *__restrict center_point_array, float *__restrict cutoff, unsigned int num_points)
Definition volk_32f_x3_sum_of_poly_32f.h:368
static void volk_32f_x3_sum_of_poly_32f_generic(float *target, float *src0, float *center_point_array, float *cutoff, unsigned int num_points)
Definition volk_32f_x3_sum_of_poly_32f.h:317
static void volk_32f_x3_sum_of_poly_32f_a_avx(float *target, float *src0, float *center_point_array, float *cutoff, unsigned int num_points)
Definition volk_32f_x3_sum_of_poly_32f.h:246
#define MAX(X, Y)
Definition volk_32f_x3_sum_of_poly_32f.h:79
#define __VOLK_ATTR_ALIGNED(x)
Definition volk_common.h:62
for i
Definition volk_config_fixed.tmpl.h:13
#define RISCV_SHRINK4(op, T, S, v)
Definition volk_rvv_intrinsics.h:24