55#ifndef INCLUDED_volk_32f_tanh_32f_a_H
56#define INCLUDED_volk_32f_tanh_32f_a_H
69 unsigned int number = 0;
70 float* cPtr = cVector;
71 const float* aPtr = aVector;
72 for (; number < num_points; number++) {
73 *cPtr++ = tanhf(*aPtr++);
85 float* cPtr = cVector;
86 const float* aPtr = aVector;
87 for (
unsigned int number = 0; number < num_points; number++) {
90 else if (*aPtr <= -4.97)
93 float x2 = (*aPtr) * (*aPtr);
94 float a = (*aPtr) * (135135.0f + x2 * (17325.0f + x2 * (378.0f + x2)));
95 float b = 135135.0f + x2 * (62370.0f + x2 * (3150.0f + x2 * 28.0f));
106#include <xmmintrin.h>
111 unsigned int number = 0;
112 const unsigned int quarterPoints = num_points / 4;
114 float* cPtr = cVector;
115 const float* aPtr = aVector;
117 __m128 aVal, cVal, x2, a, b;
118 __m128 const1, const2, const3, const4, const5, const6;
119 const1 = _mm_set_ps1(135135.0f);
120 const2 = _mm_set_ps1(17325.0f);
121 const3 = _mm_set_ps1(378.0f);
122 const4 = _mm_set_ps1(62370.0f);
123 const5 = _mm_set_ps1(3150.0f);
124 const6 = _mm_set_ps1(28.0f);
125 for (; number < quarterPoints; number++) {
127 aVal = _mm_load_ps(aPtr);
128 x2 = _mm_mul_ps(aVal, aVal);
134 _mm_add_ps(const2, _mm_mul_ps(x2, _mm_add_ps(const3, x2))))));
140 _mm_mul_ps(x2, _mm_add_ps(const5, _mm_mul_ps(x2, const6))))));
142 cVal = _mm_div_ps(a, b);
144 _mm_store_ps(cPtr, cVal);
150 number = quarterPoints * 4;
157#include <immintrin.h>
162 unsigned int number = 0;
163 const unsigned int eighthPoints = num_points / 8;
165 float* cPtr = cVector;
166 const float* aPtr = aVector;
168 __m256 aVal, cVal, x2, a, b;
169 __m256 const1, const2, const3, const4, const5, const6;
170 const1 = _mm256_set1_ps(135135.0f);
171 const2 = _mm256_set1_ps(17325.0f);
172 const3 = _mm256_set1_ps(378.0f);
173 const4 = _mm256_set1_ps(62370.0f);
174 const5 = _mm256_set1_ps(3150.0f);
175 const6 = _mm256_set1_ps(28.0f);
176 for (; number < eighthPoints; number++) {
178 aVal = _mm256_load_ps(aPtr);
179 x2 = _mm256_mul_ps(aVal, aVal);
186 _mm256_add_ps(const2,
187 _mm256_mul_ps(x2, _mm256_add_ps(const3, x2))))));
195 _mm256_add_ps(const5, _mm256_mul_ps(x2, const6))))));
197 cVal = _mm256_div_ps(a, b);
199 _mm256_store_ps(cPtr, cVal);
205 number = eighthPoints * 8;
210#if LV_HAVE_AVX && LV_HAVE_FMA
211#include <immintrin.h>
214volk_32f_tanh_32f_a_avx_fma(
float* cVector,
const float* aVector,
unsigned int num_points)
216 unsigned int number = 0;
217 const unsigned int eighthPoints = num_points / 8;
219 float* cPtr = cVector;
220 const float* aPtr = aVector;
222 __m256 aVal, cVal, x2, a, b;
223 __m256 const1, const2, const3, const4, const5, const6;
224 const1 = _mm256_set1_ps(135135.0f);
225 const2 = _mm256_set1_ps(17325.0f);
226 const3 = _mm256_set1_ps(378.0f);
227 const4 = _mm256_set1_ps(62370.0f);
228 const5 = _mm256_set1_ps(3150.0f);
229 const6 = _mm256_set1_ps(28.0f);
230 for (; number < eighthPoints; number++) {
232 aVal = _mm256_load_ps(aPtr);
233 x2 = _mm256_mul_ps(aVal, aVal);
237 x2, _mm256_fmadd_ps(x2, _mm256_add_ps(const3, x2), const2), const1));
239 x2, _mm256_fmadd_ps(x2, _mm256_fmadd_ps(x2, const6, const5), const4), const1);
241 cVal = _mm256_div_ps(a, b);
243 _mm256_store_ps(cPtr, cVal);
249 number = eighthPoints * 8;
257#ifndef INCLUDED_volk_32f_tanh_32f_u_H
258#define INCLUDED_volk_32f_tanh_32f_u_H
267#include <xmmintrin.h>
272 unsigned int number = 0;
273 const unsigned int quarterPoints = num_points / 4;
275 float* cPtr = cVector;
276 const float* aPtr = aVector;
278 __m128 aVal, cVal, x2, a, b;
279 __m128 const1, const2, const3, const4, const5, const6;
280 const1 = _mm_set_ps1(135135.0f);
281 const2 = _mm_set_ps1(17325.0f);
282 const3 = _mm_set_ps1(378.0f);
283 const4 = _mm_set_ps1(62370.0f);
284 const5 = _mm_set_ps1(3150.0f);
285 const6 = _mm_set_ps1(28.0f);
286 for (; number < quarterPoints; number++) {
288 aVal = _mm_loadu_ps(aPtr);
289 x2 = _mm_mul_ps(aVal, aVal);
295 _mm_add_ps(const2, _mm_mul_ps(x2, _mm_add_ps(const3, x2))))));
301 _mm_mul_ps(x2, _mm_add_ps(const5, _mm_mul_ps(x2, const6))))));
303 cVal = _mm_div_ps(a, b);
305 _mm_storeu_ps(cPtr, cVal);
311 number = quarterPoints * 4;
318#include <immintrin.h>
323 unsigned int number = 0;
324 const unsigned int eighthPoints = num_points / 8;
326 float* cPtr = cVector;
327 const float* aPtr = aVector;
329 __m256 aVal, cVal, x2, a, b;
330 __m256 const1, const2, const3, const4, const5, const6;
331 const1 = _mm256_set1_ps(135135.0f);
332 const2 = _mm256_set1_ps(17325.0f);
333 const3 = _mm256_set1_ps(378.0f);
334 const4 = _mm256_set1_ps(62370.0f);
335 const5 = _mm256_set1_ps(3150.0f);
336 const6 = _mm256_set1_ps(28.0f);
337 for (; number < eighthPoints; number++) {
339 aVal = _mm256_loadu_ps(aPtr);
340 x2 = _mm256_mul_ps(aVal, aVal);
347 _mm256_add_ps(const2,
348 _mm256_mul_ps(x2, _mm256_add_ps(const3, x2))))));
356 _mm256_add_ps(const5, _mm256_mul_ps(x2, const6))))));
358 cVal = _mm256_div_ps(a, b);
360 _mm256_storeu_ps(cPtr, cVal);
366 number = eighthPoints * 8;
371#if LV_HAVE_AVX && LV_HAVE_FMA
372#include <immintrin.h>
375volk_32f_tanh_32f_u_avx_fma(
float* cVector,
const float* aVector,
unsigned int num_points)
377 unsigned int number = 0;
378 const unsigned int eighthPoints = num_points / 8;
380 float* cPtr = cVector;
381 const float* aPtr = aVector;
383 __m256 aVal, cVal, x2, a, b;
384 __m256 const1, const2, const3, const4, const5, const6;
385 const1 = _mm256_set1_ps(135135.0f);
386 const2 = _mm256_set1_ps(17325.0f);
387 const3 = _mm256_set1_ps(378.0f);
388 const4 = _mm256_set1_ps(62370.0f);
389 const5 = _mm256_set1_ps(3150.0f);
390 const6 = _mm256_set1_ps(28.0f);
391 for (; number < eighthPoints; number++) {
393 aVal = _mm256_loadu_ps(aPtr);
394 x2 = _mm256_mul_ps(aVal, aVal);
398 x2, _mm256_fmadd_ps(x2, _mm256_add_ps(const3, x2), const2), const1));
400 x2, _mm256_fmadd_ps(x2, _mm256_fmadd_ps(x2, const6, const5), const4), const1);
402 cVal = _mm256_div_ps(a, b);
404 _mm256_storeu_ps(cPtr, cVal);
410 number = eighthPoints * 8;
416#include <riscv_vector.h>
419volk_32f_tanh_32f_rvv(
float* bVector,
const float* aVector,
unsigned int num_points)
421 size_t vlmax = __riscv_vsetvlmax_e32m2();
423 const vfloat32m2_t c1 = __riscv_vfmv_v_f_f32m2(135135.0f, vlmax);
424 const vfloat32m2_t c2 = __riscv_vfmv_v_f_f32m2(17325.0f, vlmax);
425 const vfloat32m2_t c3 = __riscv_vfmv_v_f_f32m2(378.0f, vlmax);
426 const vfloat32m2_t c4 = __riscv_vfmv_v_f_f32m2(62370.0f, vlmax);
427 const vfloat32m2_t c5 = __riscv_vfmv_v_f_f32m2(3150.0f, vlmax);
428 const vfloat32m2_t c6 = __riscv_vfmv_v_f_f32m2(28.0f, vlmax);
430 size_t n = num_points;
431 for (
size_t vl; n > 0; n -= vl, aVector += vl, bVector += vl) {
432 vl = __riscv_vsetvl_e32m2(n);
433 vfloat32m2_t x = __riscv_vle32_v_f32m2(aVector, vl);
434 vfloat32m2_t xx = __riscv_vfmul(x, x, vl);
436 a = __riscv_vfadd(xx, c3, vl);
437 a = __riscv_vfmadd(a, xx, c2, vl);
438 a = __riscv_vfmadd(a, xx, c1, vl);
439 a = __riscv_vfmul(a, x, vl);
441 b = __riscv_vfmadd(b, xx, c5, vl);
442 b = __riscv_vfmadd(b, xx, c4, vl);
443 b = __riscv_vfmadd(b, xx, c1, vl);
444 __riscv_vse32(bVector, __riscv_vfdiv(a, b, vl), vl);
static void volk_32f_tanh_32f_u_avx(float *cVector, const float *aVector, unsigned int num_points)
Definition volk_32f_tanh_32f.h:321
static void volk_32f_tanh_32f_generic(float *cVector, const float *aVector, unsigned int num_points)
Definition volk_32f_tanh_32f.h:67
static void volk_32f_tanh_32f_a_avx(float *cVector, const float *aVector, unsigned int num_points)
Definition volk_32f_tanh_32f.h:160
static void volk_32f_tanh_32f_a_sse(float *cVector, const float *aVector, unsigned int num_points)
Definition volk_32f_tanh_32f.h:109
static void volk_32f_tanh_32f_series(float *cVector, const float *aVector, unsigned int num_points)
Definition volk_32f_tanh_32f.h:83
static void volk_32f_tanh_32f_u_sse(float *cVector, const float *aVector, unsigned int num_points)
Definition volk_32f_tanh_32f.h:270