97#ifndef INCLUDED_volk_32fc_x2_divide_32fc_u_H
98#define INCLUDED_volk_32fc_x2_divide_32fc_u_H
105#ifdef LV_HAVE_GENERIC
110 unsigned int num_points)
116 for (
unsigned int number = 0; number < num_points; number++) {
117 *cPtr++ = (*aPtr++) / (*bPtr++);
124#include <pmmintrin.h>
130 unsigned int num_points)
138 unsigned int number = 0;
139 const unsigned int quarterPoints = num_points / 4;
141 __m128 num01, num23, den01, den23, norm, result;
146 for (; number < quarterPoints; number++) {
147 num01 = _mm_loadu_ps((
float*)a);
148 den01 = _mm_loadu_ps((
float*)b);
153 num23 = _mm_loadu_ps((
float*)a);
154 den23 = _mm_loadu_ps((
float*)b);
160 den01 = _mm_unpacklo_ps(norm, norm);
161 den23 = _mm_unpackhi_ps(norm, norm);
163 result = _mm_div_ps(num01, den01);
164 _mm_storeu_ps((
float*)c, result);
166 result = _mm_div_ps(num23, den23);
167 _mm_storeu_ps((
float*)c, result);
172 for (; number < num_points; number++) {
183#include <immintrin.h>
189 unsigned int num_points)
197 unsigned int number = 0;
198 const unsigned int quarterPoints = num_points / 4;
200 __m256 num, denum, mul_conj, sq, mag_sq, mag_sq_un, div;
205 for (; number < quarterPoints; number++) {
206 num = _mm256_loadu_ps(
208 denum = _mm256_loadu_ps(
211 sq = _mm256_mul_ps(denum, denum);
212 mag_sq_un = _mm256_hadd_ps(
214 mag_sq = _mm256_permute_ps(mag_sq_un, 0xd8);
217 div = _mm256_div_ps(mul_conj, mag_sq);
219 _mm256_storeu_ps((
float*)c, div);
226 number = quarterPoints * 4;
228 for (; number < num_points; number++) {
229 *c++ = (*a++) / (*b++);
238#ifndef INCLUDED_volk_32fc_x2_divide_32fc_a_H
239#define INCLUDED_volk_32fc_x2_divide_32fc_a_H
247#include <pmmintrin.h>
253 unsigned int num_points)
261 unsigned int number = 0;
262 const unsigned int quarterPoints = num_points / 4;
264 __m128 num01, num23, den01, den23, norm, result;
269 for (; number < quarterPoints; number++) {
270 num01 = _mm_load_ps((
float*)a);
271 den01 = _mm_load_ps((
float*)b);
276 num23 = _mm_load_ps((
float*)a);
277 den23 = _mm_load_ps((
float*)b);
284 den01 = _mm_unpacklo_ps(norm, norm);
285 den23 = _mm_unpackhi_ps(norm, norm);
287 result = _mm_div_ps(num01, den01);
288 _mm_store_ps((
float*)c, result);
290 result = _mm_div_ps(num23, den23);
291 _mm_store_ps((
float*)c, result);
296 for (; number < num_points; number++) {
306#include <immintrin.h>
312 unsigned int num_points)
328 const unsigned int eigthPoints = num_points / 8;
330 __m256 num01, num23, denum01, denum23, complex_result, result0, result1;
332 for (
unsigned int number = 0; number < eigthPoints; number++) {
334 num01 = _mm256_load_ps((
float*)a);
335 denum01 = _mm256_load_ps((
float*)b);
341 num23 = _mm256_load_ps((
float*)a);
342 denum23 = _mm256_load_ps((
float*)b);
347 complex_result = _mm256_hadd_ps(_mm256_mul_ps(denum01, denum01),
348 _mm256_mul_ps(denum23, denum23));
350 denum01 = _mm256_shuffle_ps(complex_result, complex_result, 0x50);
351 denum23 = _mm256_shuffle_ps(complex_result, complex_result, 0xfa);
353 result0 = _mm256_div_ps(num01, denum01);
354 result1 = _mm256_div_ps(num23, denum23);
356 _mm256_store_ps((
float*)c, result0);
358 _mm256_store_ps((
float*)c, result1);
373 unsigned int num_points)
379 float32x4x2_t aVal, bVal, cVal;
380 float32x4_t bAbs, bAbsInv;
382 const unsigned int quarterPoints = num_points / 4;
383 unsigned int number = 0;
384 for (; number < quarterPoints; number++) {
385 aVal = vld2q_f32((
const float*)(aPtr));
386 bVal = vld2q_f32((
const float*)(bPtr));
392 bAbs = vmulq_f32(bVal.val[0], bVal.val[0]);
393 bAbs = vmlaq_f32(bAbs, bVal.val[1], bVal.val[1]);
395 bAbsInv = vrecpeq_f32(bAbs);
396 bAbsInv = vmulq_f32(bAbsInv, vrecpsq_f32(bAbsInv, bAbs));
397 bAbsInv = vmulq_f32(bAbsInv, vrecpsq_f32(bAbsInv, bAbs));
399 cVal.val[0] = vmulq_f32(aVal.val[0], bVal.val[0]);
400 cVal.val[0] = vmlaq_f32(cVal.val[0], aVal.val[1], bVal.val[1]);
401 cVal.val[0] = vmulq_f32(cVal.val[0], bAbsInv);
403 cVal.val[1] = vmulq_f32(aVal.val[1], bVal.val[0]);
404 cVal.val[1] = vmlsq_f32(cVal.val[1], aVal.val[0], bVal.val[1]);
405 cVal.val[1] = vmulq_f32(cVal.val[1], bAbsInv);
407 vst2q_f32((
float*)(cPtr), cVal);
411 for (number = quarterPoints * 4; number < num_points; number++) {
412 *cPtr++ = (*aPtr++) / (*bPtr++);
418#include <riscv_vector.h>
421static inline void volk_32fc_x2_divide_32fc_rvv(
lv_32fc_t* cVector,
424 unsigned int num_points)
426 uint64_t* out = (uint64_t*)cVector;
427 size_t n = num_points;
428 for (
size_t vl; n > 0; n -= vl, aVector += vl, bVector += vl, out += vl) {
429 vl = __riscv_vsetvl_e32m4(n);
430 vuint64m8_t va = __riscv_vle64_v_u64m8((
const uint64_t*)aVector, vl);
431 vuint64m8_t vb = __riscv_vle64_v_u64m8((
const uint64_t*)bVector, vl);
432 vfloat32m4_t var = __riscv_vreinterpret_f32m4(__riscv_vnsrl(va, 0, vl));
433 vfloat32m4_t vbr = __riscv_vreinterpret_f32m4(__riscv_vnsrl(vb, 0, vl));
434 vfloat32m4_t vai = __riscv_vreinterpret_f32m4(__riscv_vnsrl(va, 32, vl));
435 vfloat32m4_t vbi = __riscv_vreinterpret_f32m4(__riscv_vnsrl(vb, 32, vl));
436 vfloat32m4_t mul = __riscv_vfrdiv(
437 __riscv_vfmacc(__riscv_vfmul(vbi, vbi, vl), vbr, vbr, vl), 1.0f, vl);
438 vfloat32m4_t vr = __riscv_vfmul(
439 __riscv_vfmacc(__riscv_vfmul(var, vbr, vl), vai, vbi, vl), mul, vl);
440 vfloat32m4_t vi = __riscv_vfmul(
441 __riscv_vfnmsac(__riscv_vfmul(vai, vbr, vl), var, vbi, vl), mul, vl);
442 vuint32m4_t vru = __riscv_vreinterpret_u32m4(vr);
443 vuint32m4_t viu = __riscv_vreinterpret_u32m4(vi);
445 __riscv_vwmaccu(__riscv_vwaddu_vv(vru, viu, vl), 0xFFFFFFFF, viu, vl);
446 __riscv_vse64(out, v, vl);
452#include <riscv_vector.h>
454static inline void volk_32fc_x2_divide_32fc_rvvseg(
lv_32fc_t* cVector,
457 unsigned int num_points)
459 size_t n = num_points;
460 for (
size_t vl; n > 0; n -= vl, aVector += vl, bVector += vl, cVector += vl) {
461 vl = __riscv_vsetvl_e32m4(n);
462 vfloat32m4x2_t va = __riscv_vlseg2e32_v_f32m4x2((
const float*)aVector, vl);
463 vfloat32m4x2_t vb = __riscv_vlseg2e32_v_f32m4x2((
const float*)bVector, vl);
464 vfloat32m4_t var = __riscv_vget_f32m4(va, 0), vai = __riscv_vget_f32m4(va, 1);
465 vfloat32m4_t vbr = __riscv_vget_f32m4(vb, 0), vbi = __riscv_vget_f32m4(vb, 1);
466 vfloat32m4_t mul = __riscv_vfrdiv(
467 __riscv_vfmacc(__riscv_vfmul(vbi, vbi, vl), vbr, vbr, vl), 1.0f, vl);
468 vfloat32m4_t vr = __riscv_vfmul(
469 __riscv_vfmacc(__riscv_vfmul(var, vbr, vl), vai, vbi, vl), mul, vl);
470 vfloat32m4_t vi = __riscv_vfmul(
471 __riscv_vfnmsac(__riscv_vfmul(vai, vbr, vl), var, vbi, vl), mul, vl);
472 __riscv_vsseg2e32_v_f32m4x2(
473 (
float*)cVector, __riscv_vcreate_v_f32m4x2(vr, vi), vl);
static void volk_32fc_x2_divide_32fc_a_sse3(lv_32fc_t *cVector, const lv_32fc_t *numeratorVector, const lv_32fc_t *denumeratorVector, unsigned int num_points)
Definition volk_32fc_x2_divide_32fc.h:250
static void volk_32fc_x2_divide_32fc_generic(lv_32fc_t *cVector, const lv_32fc_t *aVector, const lv_32fc_t *bVector, unsigned int num_points)
Definition volk_32fc_x2_divide_32fc.h:107
static void volk_32fc_x2_divide_32fc_neon(lv_32fc_t *cVector, const lv_32fc_t *aVector, const lv_32fc_t *bVector, unsigned int num_points)
Definition volk_32fc_x2_divide_32fc.h:370
static void volk_32fc_x2_divide_32fc_u_avx(lv_32fc_t *cVector, const lv_32fc_t *numeratorVector, const lv_32fc_t *denumeratorVector, unsigned int num_points)
Definition volk_32fc_x2_divide_32fc.h:186
static void volk_32fc_x2_divide_32fc_a_avx(lv_32fc_t *cVector, const lv_32fc_t *numeratorVector, const lv_32fc_t *denumeratorVector, unsigned int num_points)
Definition volk_32fc_x2_divide_32fc.h:309
static void volk_32fc_x2_divide_32fc_u_sse3(lv_32fc_t *cVector, const lv_32fc_t *numeratorVector, const lv_32fc_t *denumeratorVector, unsigned int num_points)
Definition volk_32fc_x2_divide_32fc.h:127
static __m256 _mm256_complexconjugatemul_ps(const __m256 x, const __m256 y)
Definition volk_avx_intrinsics.h:76
#define __VOLK_PREFETCH(addr)
Definition volk_common.h:68
float complex lv_32fc_t
Definition volk_complex.h:74
static __m128 _mm_magnitudesquared_ps_sse3(__m128 cplxValue1, __m128 cplxValue2)
Definition volk_sse3_intrinsics.h:38
static __m128 _mm_complexconjugatemul_ps(__m128 x, __m128 y)
Definition volk_sse3_intrinsics.h:31