Vector Optimized Library of Kernels 3.2.0
Architecture-tuned implementations of math kernels
Loading...
Searching...
No Matches
volk_16i_branch_4_state_8.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2012, 2014 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
46
47#ifndef INCLUDED_volk_16i_branch_4_state_8_a_H
48#define INCLUDED_volk_16i_branch_4_state_8_a_H
49
50#include <inttypes.h>
51#include <stdio.h>
52
53#ifdef LV_HAVE_SSSE3
54
55#include <emmintrin.h>
56#include <tmmintrin.h>
57#include <xmmintrin.h>
58
59static inline void volk_16i_branch_4_state_8_a_ssse3(short* target,
60 short* src0,
61 char** permuters,
62 short* cntl2,
63 short* cntl3,
64 short* scalars)
65{
66 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11;
67 __m128i *p_target, *p_src0, *p_cntl2, *p_cntl3, *p_scalars;
68
69 p_target = (__m128i*)target;
70 p_src0 = (__m128i*)src0;
71 p_cntl2 = (__m128i*)cntl2;
72 p_cntl3 = (__m128i*)cntl3;
73 p_scalars = (__m128i*)scalars;
74
75 xmm0 = _mm_load_si128(p_scalars);
76
77 xmm1 = _mm_shufflelo_epi16(xmm0, 0);
78 xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
79 xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
80 xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
81
82 xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
83 xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
84 xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
85 xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
86
87 xmm0 = _mm_load_si128((__m128i*)permuters[0]);
88 xmm6 = _mm_load_si128((__m128i*)permuters[1]);
89 xmm8 = _mm_load_si128((__m128i*)permuters[2]);
90 xmm10 = _mm_load_si128((__m128i*)permuters[3]);
91
92 xmm5 = _mm_load_si128(p_src0);
93 xmm0 = _mm_shuffle_epi8(xmm5, xmm0);
94 xmm6 = _mm_shuffle_epi8(xmm5, xmm6);
95 xmm8 = _mm_shuffle_epi8(xmm5, xmm8);
96 xmm10 = _mm_shuffle_epi8(xmm5, xmm10);
97
98 xmm5 = _mm_add_epi16(xmm1, xmm2);
99
100 xmm6 = _mm_add_epi16(xmm2, xmm6);
101 xmm8 = _mm_add_epi16(xmm1, xmm8);
102
103 xmm7 = _mm_load_si128(p_cntl2);
104 xmm9 = _mm_load_si128(p_cntl3);
105
106 xmm0 = _mm_add_epi16(xmm5, xmm0);
107
108 xmm7 = _mm_and_si128(xmm7, xmm3);
109 xmm9 = _mm_and_si128(xmm9, xmm4);
110
111 xmm5 = _mm_load_si128(&p_cntl2[1]);
112 xmm11 = _mm_load_si128(&p_cntl3[1]);
113
114 xmm7 = _mm_add_epi16(xmm7, xmm9);
115
116 xmm5 = _mm_and_si128(xmm5, xmm3);
117 xmm11 = _mm_and_si128(xmm11, xmm4);
118
119 xmm0 = _mm_add_epi16(xmm0, xmm7);
120
121
122 xmm7 = _mm_load_si128(&p_cntl2[2]);
123 xmm9 = _mm_load_si128(&p_cntl3[2]);
124
125 xmm5 = _mm_add_epi16(xmm5, xmm11);
126
127 xmm7 = _mm_and_si128(xmm7, xmm3);
128 xmm9 = _mm_and_si128(xmm9, xmm4);
129
130 xmm6 = _mm_add_epi16(xmm6, xmm5);
131
132
133 xmm5 = _mm_load_si128(&p_cntl2[3]);
134 xmm11 = _mm_load_si128(&p_cntl3[3]);
135
136 xmm7 = _mm_add_epi16(xmm7, xmm9);
137
138 xmm5 = _mm_and_si128(xmm5, xmm3);
139 xmm11 = _mm_and_si128(xmm11, xmm4);
140
141 xmm8 = _mm_add_epi16(xmm8, xmm7);
142
143 xmm5 = _mm_add_epi16(xmm5, xmm11);
144
145 _mm_store_si128(p_target, xmm0);
146 _mm_store_si128(&p_target[1], xmm6);
147
148 xmm10 = _mm_add_epi16(xmm5, xmm10);
149
150 _mm_store_si128(&p_target[2], xmm8);
151
152 _mm_store_si128(&p_target[3], xmm10);
153}
154
155
156#endif /*LV_HAVE_SSEs*/
157
158#ifdef LV_HAVE_GENERIC
159static inline void volk_16i_branch_4_state_8_generic(short* target,
160 short* src0,
161 char** permuters,
162 short* cntl2,
163 short* cntl3,
164 short* scalars)
165{
166 int i = 0;
167
168 int bound = 4;
169
170 for (; i < bound; ++i) {
171 target[i * 8] = src0[((char)permuters[i][0]) / 2] + ((i + 1) % 2 * scalars[0]) +
172 (((i >> 1) ^ 1) * scalars[1]) + (cntl2[i * 8] & scalars[2]) +
173 (cntl3[i * 8] & scalars[3]);
174 target[i * 8 + 1] = src0[((char)permuters[i][1 * 2]) / 2] +
175 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
176 (cntl2[i * 8 + 1] & scalars[2]) +
177 (cntl3[i * 8 + 1] & scalars[3]);
178 target[i * 8 + 2] = src0[((char)permuters[i][2 * 2]) / 2] +
179 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
180 (cntl2[i * 8 + 2] & scalars[2]) +
181 (cntl3[i * 8 + 2] & scalars[3]);
182 target[i * 8 + 3] = src0[((char)permuters[i][3 * 2]) / 2] +
183 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
184 (cntl2[i * 8 + 3] & scalars[2]) +
185 (cntl3[i * 8 + 3] & scalars[3]);
186 target[i * 8 + 4] = src0[((char)permuters[i][4 * 2]) / 2] +
187 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
188 (cntl2[i * 8 + 4] & scalars[2]) +
189 (cntl3[i * 8 + 4] & scalars[3]);
190 target[i * 8 + 5] = src0[((char)permuters[i][5 * 2]) / 2] +
191 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
192 (cntl2[i * 8 + 5] & scalars[2]) +
193 (cntl3[i * 8 + 5] & scalars[3]);
194 target[i * 8 + 6] = src0[((char)permuters[i][6 * 2]) / 2] +
195 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
196 (cntl2[i * 8 + 6] & scalars[2]) +
197 (cntl3[i * 8 + 6] & scalars[3]);
198 target[i * 8 + 7] = src0[((char)permuters[i][7 * 2]) / 2] +
199 ((i + 1) % 2 * scalars[0]) + (((i >> 1) ^ 1) * scalars[1]) +
200 (cntl2[i * 8 + 7] & scalars[2]) +
201 (cntl3[i * 8 + 7] & scalars[3]);
202 }
203}
204
205#endif /*LV_HAVE_GENERIC*/
206
207
208#endif /*INCLUDED_volk_16i_branch_4_state_8_a_H*/
static void volk_16i_branch_4_state_8_a_ssse3(short *target, short *src0, char **permuters, short *cntl2, short *cntl3, short *scalars)
Definition volk_16i_branch_4_state_8.h:59
static void volk_16i_branch_4_state_8_generic(short *target, short *src0, char **permuters, short *cntl2, short *cntl3, short *scalars)
Definition volk_16i_branch_4_state_8.h:159
for i
Definition volk_config_fixed.tmpl.h:13