mbed TLS v2.16.11
include
mbedtls
bn_mul.h
Go to the documentation of this file.
1
6
/*
7
* Copyright The Mbed TLS Contributors
8
* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
9
*
10
* This file is provided under the Apache License 2.0, or the
11
* GNU General Public License v2.0 or later.
12
*
13
* **********
14
* Apache License 2.0:
15
*
16
* Licensed under the Apache License, Version 2.0 (the "License"); you may
17
* not use this file except in compliance with the License.
18
* You may obtain a copy of the License at
19
*
20
* http://www.apache.org/licenses/LICENSE-2.0
21
*
22
* Unless required by applicable law or agreed to in writing, software
23
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
24
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
* See the License for the specific language governing permissions and
26
* limitations under the License.
27
*
28
* **********
29
*
30
* **********
31
* GNU General Public License v2.0 or later:
32
*
33
* This program is free software; you can redistribute it and/or modify
34
* it under the terms of the GNU General Public License as published by
35
* the Free Software Foundation; either version 2 of the License, or
36
* (at your option) any later version.
37
*
38
* This program is distributed in the hope that it will be useful,
39
* but WITHOUT ANY WARRANTY; without even the implied warranty of
40
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41
* GNU General Public License for more details.
42
*
43
* You should have received a copy of the GNU General Public License along
44
* with this program; if not, write to the Free Software Foundation, Inc.,
45
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
46
*
47
* **********
48
*/
49
/*
50
* Multiply source vector [s] with b, add result
51
* to destination vector [d] and set carry c.
52
*
53
* Currently supports:
54
*
55
* . IA-32 (386+) . AMD64 / EM64T
56
* . IA-32 (SSE2) . Motorola 68000
57
* . PowerPC, 32-bit . MicroBlaze
58
* . PowerPC, 64-bit . TriCore
59
* . SPARC v8 . ARM v3+
60
* . Alpha . MIPS32
61
* . C, longlong . C, generic
62
*/
63
#ifndef MBEDTLS_BN_MUL_H
64
#define MBEDTLS_BN_MUL_H
65
66
#if !defined(MBEDTLS_CONFIG_FILE)
67
#include "
config.h
"
68
#else
69
#include MBEDTLS_CONFIG_FILE
70
#endif
71
72
#include "
bignum.h
"
73
74
75
/*
76
* Conversion macros for embedded constants:
77
* build lists of mbedtls_mpi_uint's from lists of unsigned char's grouped by 8, 4 or 2
78
*/
79
#if defined(MBEDTLS_HAVE_INT32)
80
81
#define MBEDTLS_BYTES_TO_T_UINT_4( a, b, c, d ) \
82
( (mbedtls_mpi_uint) (a) << 0 ) | \
83
( (mbedtls_mpi_uint) (b) << 8 ) | \
84
( (mbedtls_mpi_uint) (c) << 16 ) | \
85
( (mbedtls_mpi_uint) (d) << 24 )
86
87
#define MBEDTLS_BYTES_TO_T_UINT_2( a, b ) \
88
MBEDTLS_BYTES_TO_T_UINT_4( a, b, 0, 0 )
89
90
#define MBEDTLS_BYTES_TO_T_UINT_8( a, b, c, d, e, f, g, h ) \
91
MBEDTLS_BYTES_TO_T_UINT_4( a, b, c, d ), \
92
MBEDTLS_BYTES_TO_T_UINT_4( e, f, g, h )
93
94
#else
/* 64-bits */
95
96
#define MBEDTLS_BYTES_TO_T_UINT_8( a, b, c, d, e, f, g, h ) \
97
( (mbedtls_mpi_uint) (a) << 0 ) | \
98
( (mbedtls_mpi_uint) (b) << 8 ) | \
99
( (mbedtls_mpi_uint) (c) << 16 ) | \
100
( (mbedtls_mpi_uint) (d) << 24 ) | \
101
( (mbedtls_mpi_uint) (e) << 32 ) | \
102
( (mbedtls_mpi_uint) (f) << 40 ) | \
103
( (mbedtls_mpi_uint) (g) << 48 ) | \
104
( (mbedtls_mpi_uint) (h) << 56 )
105
106
#define MBEDTLS_BYTES_TO_T_UINT_4( a, b, c, d ) \
107
MBEDTLS_BYTES_TO_T_UINT_8( a, b, c, d, 0, 0, 0, 0 )
108
109
#define MBEDTLS_BYTES_TO_T_UINT_2( a, b ) \
110
MBEDTLS_BYTES_TO_T_UINT_8( a, b, 0, 0, 0, 0, 0, 0 )
111
112
#endif
/* bits in mbedtls_mpi_uint */
113
114
#if defined(MBEDTLS_HAVE_ASM)
115
116
#ifndef asm
117
#define asm __asm
118
#endif
119
120
/* armcc5 --gnu defines __GNUC__ but doesn't support GNU's extended asm */
121
#if defined(__GNUC__) && \
122
( !defined(__ARMCC_VERSION) || __ARMCC_VERSION >= 6000000 )
123
124
/*
125
* Disable use of the i386 assembly code below if option -O0, to disable all
126
* compiler optimisations, is passed, detected with __OPTIMIZE__
127
* This is done as the number of registers used in the assembly code doesn't
128
* work with the -O0 option.
129
*/
130
#if defined(__i386__) && defined(__OPTIMIZE__)
131
132
#define MULADDC_INIT \
133
asm( \
134
"movl %%ebx, %0 \n\t" \
135
"movl %5, %%esi \n\t" \
136
"movl %6, %%edi \n\t" \
137
"movl %7, %%ecx \n\t" \
138
"movl %8, %%ebx \n\t"
139
140
#define MULADDC_CORE \
141
"lodsl \n\t" \
142
"mull %%ebx \n\t" \
143
"addl %%ecx, %%eax \n\t" \
144
"adcl $0, %%edx \n\t" \
145
"addl (%%edi), %%eax \n\t" \
146
"adcl $0, %%edx \n\t" \
147
"movl %%edx, %%ecx \n\t" \
148
"stosl \n\t"
149
150
#if defined(MBEDTLS_HAVE_SSE2)
151
152
#define MULADDC_HUIT \
153
"movd %%ecx, %%mm1 \n\t" \
154
"movd %%ebx, %%mm0 \n\t" \
155
"movd (%%edi), %%mm3 \n\t" \
156
"paddq %%mm3, %%mm1 \n\t" \
157
"movd (%%esi), %%mm2 \n\t" \
158
"pmuludq %%mm0, %%mm2 \n\t" \
159
"movd 4(%%esi), %%mm4 \n\t" \
160
"pmuludq %%mm0, %%mm4 \n\t" \
161
"movd 8(%%esi), %%mm6 \n\t" \
162
"pmuludq %%mm0, %%mm6 \n\t" \
163
"movd 12(%%esi), %%mm7 \n\t" \
164
"pmuludq %%mm0, %%mm7 \n\t" \
165
"paddq %%mm2, %%mm1 \n\t" \
166
"movd 4(%%edi), %%mm3 \n\t" \
167
"paddq %%mm4, %%mm3 \n\t" \
168
"movd 8(%%edi), %%mm5 \n\t" \
169
"paddq %%mm6, %%mm5 \n\t" \
170
"movd 12(%%edi), %%mm4 \n\t" \
171
"paddq %%mm4, %%mm7 \n\t" \
172
"movd %%mm1, (%%edi) \n\t" \
173
"movd 16(%%esi), %%mm2 \n\t" \
174
"pmuludq %%mm0, %%mm2 \n\t" \
175
"psrlq $32, %%mm1 \n\t" \
176
"movd 20(%%esi), %%mm4 \n\t" \
177
"pmuludq %%mm0, %%mm4 \n\t" \
178
"paddq %%mm3, %%mm1 \n\t" \
179
"movd 24(%%esi), %%mm6 \n\t" \
180
"pmuludq %%mm0, %%mm6 \n\t" \
181
"movd %%mm1, 4(%%edi) \n\t" \
182
"psrlq $32, %%mm1 \n\t" \
183
"movd 28(%%esi), %%mm3 \n\t" \
184
"pmuludq %%mm0, %%mm3 \n\t" \
185
"paddq %%mm5, %%mm1 \n\t" \
186
"movd 16(%%edi), %%mm5 \n\t" \
187
"paddq %%mm5, %%mm2 \n\t" \
188
"movd %%mm1, 8(%%edi) \n\t" \
189
"psrlq $32, %%mm1 \n\t" \
190
"paddq %%mm7, %%mm1 \n\t" \
191
"movd 20(%%edi), %%mm5 \n\t" \
192
"paddq %%mm5, %%mm4 \n\t" \
193
"movd %%mm1, 12(%%edi) \n\t" \
194
"psrlq $32, %%mm1 \n\t" \
195
"paddq %%mm2, %%mm1 \n\t" \
196
"movd 24(%%edi), %%mm5 \n\t" \
197
"paddq %%mm5, %%mm6 \n\t" \
198
"movd %%mm1, 16(%%edi) \n\t" \
199
"psrlq $32, %%mm1 \n\t" \
200
"paddq %%mm4, %%mm1 \n\t" \
201
"movd 28(%%edi), %%mm5 \n\t" \
202
"paddq %%mm5, %%mm3 \n\t" \
203
"movd %%mm1, 20(%%edi) \n\t" \
204
"psrlq $32, %%mm1 \n\t" \
205
"paddq %%mm6, %%mm1 \n\t" \
206
"movd %%mm1, 24(%%edi) \n\t" \
207
"psrlq $32, %%mm1 \n\t" \
208
"paddq %%mm3, %%mm1 \n\t" \
209
"movd %%mm1, 28(%%edi) \n\t" \
210
"addl $32, %%edi \n\t" \
211
"addl $32, %%esi \n\t" \
212
"psrlq $32, %%mm1 \n\t" \
213
"movd %%mm1, %%ecx \n\t"
214
215
#define MULADDC_STOP \
216
"emms \n\t" \
217
"movl %4, %%ebx \n\t" \
218
"movl %%ecx, %1 \n\t" \
219
"movl %%edi, %2 \n\t" \
220
"movl %%esi, %3 \n\t" \
221
: "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
222
: "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
223
: "eax", "ebx", "ecx", "edx", "esi", "edi" \
224
);
225
226
#else
227
228
#define MULADDC_STOP \
229
"movl %4, %%ebx \n\t" \
230
"movl %%ecx, %1 \n\t" \
231
"movl %%edi, %2 \n\t" \
232
"movl %%esi, %3 \n\t" \
233
: "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
234
: "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
235
: "eax", "ebx", "ecx", "edx", "esi", "edi" \
236
);
237
#endif
/* SSE2 */
238
#endif
/* i386 */
239
240
#if defined(__amd64__) || defined (__x86_64__)
241
242
#define MULADDC_INIT \
243
asm( \
244
"xorq %%r8, %%r8\n"
245
246
#define MULADDC_CORE \
247
"movq (%%rsi), %%rax\n" \
248
"mulq %%rbx\n" \
249
"addq $8, %%rsi\n" \
250
"addq %%rcx, %%rax\n" \
251
"movq %%r8, %%rcx\n" \
252
"adcq $0, %%rdx\n" \
253
"nop \n" \
254
"addq %%rax, (%%rdi)\n" \
255
"adcq %%rdx, %%rcx\n" \
256
"addq $8, %%rdi\n"
257
258
#define MULADDC_STOP \
259
: "+c" (c), "+D" (d), "+S" (s) \
260
: "b" (b) \
261
: "rax", "rdx", "r8" \
262
);
263
264
#endif
/* AMD64 */
265
266
#if defined(__mc68020__) || defined(__mcpu32__)
267
268
#define MULADDC_INIT \
269
asm( \
270
"movl %3, %%a2 \n\t" \
271
"movl %4, %%a3 \n\t" \
272
"movl %5, %%d3 \n\t" \
273
"movl %6, %%d2 \n\t" \
274
"moveq #0, %%d0 \n\t"
275
276
#define MULADDC_CORE \
277
"movel %%a2@+, %%d1 \n\t" \
278
"mulul %%d2, %%d4:%%d1 \n\t" \
279
"addl %%d3, %%d1 \n\t" \
280
"addxl %%d0, %%d4 \n\t" \
281
"moveq #0, %%d3 \n\t" \
282
"addl %%d1, %%a3@+ \n\t" \
283
"addxl %%d4, %%d3 \n\t"
284
285
#define MULADDC_STOP \
286
"movl %%d3, %0 \n\t" \
287
"movl %%a3, %1 \n\t" \
288
"movl %%a2, %2 \n\t" \
289
: "=m" (c), "=m" (d), "=m" (s) \
290
: "m" (s), "m" (d), "m" (c), "m" (b) \
291
: "d0", "d1", "d2", "d3", "d4", "a2", "a3" \
292
);
293
294
#define MULADDC_HUIT \
295
"movel %%a2@+, %%d1 \n\t" \
296
"mulul %%d2, %%d4:%%d1 \n\t" \
297
"addxl %%d3, %%d1 \n\t" \
298
"addxl %%d0, %%d4 \n\t" \
299
"addl %%d1, %%a3@+ \n\t" \
300
"movel %%a2@+, %%d1 \n\t" \
301
"mulul %%d2, %%d3:%%d1 \n\t" \
302
"addxl %%d4, %%d1 \n\t" \
303
"addxl %%d0, %%d3 \n\t" \
304
"addl %%d1, %%a3@+ \n\t" \
305
"movel %%a2@+, %%d1 \n\t" \
306
"mulul %%d2, %%d4:%%d1 \n\t" \
307
"addxl %%d3, %%d1 \n\t" \
308
"addxl %%d0, %%d4 \n\t" \
309
"addl %%d1, %%a3@+ \n\t" \
310
"movel %%a2@+, %%d1 \n\t" \
311
"mulul %%d2, %%d3:%%d1 \n\t" \
312
"addxl %%d4, %%d1 \n\t" \
313
"addxl %%d0, %%d3 \n\t" \
314
"addl %%d1, %%a3@+ \n\t" \
315
"movel %%a2@+, %%d1 \n\t" \
316
"mulul %%d2, %%d4:%%d1 \n\t" \
317
"addxl %%d3, %%d1 \n\t" \
318
"addxl %%d0, %%d4 \n\t" \
319
"addl %%d1, %%a3@+ \n\t" \
320
"movel %%a2@+, %%d1 \n\t" \
321
"mulul %%d2, %%d3:%%d1 \n\t" \
322
"addxl %%d4, %%d1 \n\t" \
323
"addxl %%d0, %%d3 \n\t" \
324
"addl %%d1, %%a3@+ \n\t" \
325
"movel %%a2@+, %%d1 \n\t" \
326
"mulul %%d2, %%d4:%%d1 \n\t" \
327
"addxl %%d3, %%d1 \n\t" \
328
"addxl %%d0, %%d4 \n\t" \
329
"addl %%d1, %%a3@+ \n\t" \
330
"movel %%a2@+, %%d1 \n\t" \
331
"mulul %%d2, %%d3:%%d1 \n\t" \
332
"addxl %%d4, %%d1 \n\t" \
333
"addxl %%d0, %%d3 \n\t" \
334
"addl %%d1, %%a3@+ \n\t" \
335
"addxl %%d0, %%d3 \n\t"
336
337
#endif
/* MC68000 */
338
339
#if defined(__powerpc64__) || defined(__ppc64__)
340
341
#if defined(__MACH__) && defined(__APPLE__)
342
343
#define MULADDC_INIT \
344
asm( \
345
"ld r3, %3 \n\t" \
346
"ld r4, %4 \n\t" \
347
"ld r5, %5 \n\t" \
348
"ld r6, %6 \n\t" \
349
"addi r3, r3, -8 \n\t" \
350
"addi r4, r4, -8 \n\t" \
351
"addic r5, r5, 0 \n\t"
352
353
#define MULADDC_CORE \
354
"ldu r7, 8(r3) \n\t" \
355
"mulld r8, r7, r6 \n\t" \
356
"mulhdu r9, r7, r6 \n\t" \
357
"adde r8, r8, r5 \n\t" \
358
"ld r7, 8(r4) \n\t" \
359
"addze r5, r9 \n\t" \
360
"addc r8, r8, r7 \n\t" \
361
"stdu r8, 8(r4) \n\t"
362
363
#define MULADDC_STOP \
364
"addze r5, r5 \n\t" \
365
"addi r4, r4, 8 \n\t" \
366
"addi r3, r3, 8 \n\t" \
367
"std r5, %0 \n\t" \
368
"std r4, %1 \n\t" \
369
"std r3, %2 \n\t" \
370
: "=m" (c), "=m" (d), "=m" (s) \
371
: "m" (s), "m" (d), "m" (c), "m" (b) \
372
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
373
);
374
375
376
#else
/* __MACH__ && __APPLE__ */
377
378
#define MULADDC_INIT \
379
asm( \
380
"ld %%r3, %3 \n\t" \
381
"ld %%r4, %4 \n\t" \
382
"ld %%r5, %5 \n\t" \
383
"ld %%r6, %6 \n\t" \
384
"addi %%r3, %%r3, -8 \n\t" \
385
"addi %%r4, %%r4, -8 \n\t" \
386
"addic %%r5, %%r5, 0 \n\t"
387
388
#define MULADDC_CORE \
389
"ldu %%r7, 8(%%r3) \n\t" \
390
"mulld %%r8, %%r7, %%r6 \n\t" \
391
"mulhdu %%r9, %%r7, %%r6 \n\t" \
392
"adde %%r8, %%r8, %%r5 \n\t" \
393
"ld %%r7, 8(%%r4) \n\t" \
394
"addze %%r5, %%r9 \n\t" \
395
"addc %%r8, %%r8, %%r7 \n\t" \
396
"stdu %%r8, 8(%%r4) \n\t"
397
398
#define MULADDC_STOP \
399
"addze %%r5, %%r5 \n\t" \
400
"addi %%r4, %%r4, 8 \n\t" \
401
"addi %%r3, %%r3, 8 \n\t" \
402
"std %%r5, %0 \n\t" \
403
"std %%r4, %1 \n\t" \
404
"std %%r3, %2 \n\t" \
405
: "=m" (c), "=m" (d), "=m" (s) \
406
: "m" (s), "m" (d), "m" (c), "m" (b) \
407
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
408
);
409
410
#endif
/* __MACH__ && __APPLE__ */
411
412
#elif defined(__powerpc__) || defined(__ppc__)
/* end PPC64/begin PPC32 */
413
414
#if defined(__MACH__) && defined(__APPLE__)
415
416
#define MULADDC_INIT \
417
asm( \
418
"lwz r3, %3 \n\t" \
419
"lwz r4, %4 \n\t" \
420
"lwz r5, %5 \n\t" \
421
"lwz r6, %6 \n\t" \
422
"addi r3, r3, -4 \n\t" \
423
"addi r4, r4, -4 \n\t" \
424
"addic r5, r5, 0 \n\t"
425
426
#define MULADDC_CORE \
427
"lwzu r7, 4(r3) \n\t" \
428
"mullw r8, r7, r6 \n\t" \
429
"mulhwu r9, r7, r6 \n\t" \
430
"adde r8, r8, r5 \n\t" \
431
"lwz r7, 4(r4) \n\t" \
432
"addze r5, r9 \n\t" \
433
"addc r8, r8, r7 \n\t" \
434
"stwu r8, 4(r4) \n\t"
435
436
#define MULADDC_STOP \
437
"addze r5, r5 \n\t" \
438
"addi r4, r4, 4 \n\t" \
439
"addi r3, r3, 4 \n\t" \
440
"stw r5, %0 \n\t" \
441
"stw r4, %1 \n\t" \
442
"stw r3, %2 \n\t" \
443
: "=m" (c), "=m" (d), "=m" (s) \
444
: "m" (s), "m" (d), "m" (c), "m" (b) \
445
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
446
);
447
448
#else
/* __MACH__ && __APPLE__ */
449
450
#define MULADDC_INIT \
451
asm( \
452
"lwz %%r3, %3 \n\t" \
453
"lwz %%r4, %4 \n\t" \
454
"lwz %%r5, %5 \n\t" \
455
"lwz %%r6, %6 \n\t" \
456
"addi %%r3, %%r3, -4 \n\t" \
457
"addi %%r4, %%r4, -4 \n\t" \
458
"addic %%r5, %%r5, 0 \n\t"
459
460
#define MULADDC_CORE \
461
"lwzu %%r7, 4(%%r3) \n\t" \
462
"mullw %%r8, %%r7, %%r6 \n\t" \
463
"mulhwu %%r9, %%r7, %%r6 \n\t" \
464
"adde %%r8, %%r8, %%r5 \n\t" \
465
"lwz %%r7, 4(%%r4) \n\t" \
466
"addze %%r5, %%r9 \n\t" \
467
"addc %%r8, %%r8, %%r7 \n\t" \
468
"stwu %%r8, 4(%%r4) \n\t"
469
470
#define MULADDC_STOP \
471
"addze %%r5, %%r5 \n\t" \
472
"addi %%r4, %%r4, 4 \n\t" \
473
"addi %%r3, %%r3, 4 \n\t" \
474
"stw %%r5, %0 \n\t" \
475
"stw %%r4, %1 \n\t" \
476
"stw %%r3, %2 \n\t" \
477
: "=m" (c), "=m" (d), "=m" (s) \
478
: "m" (s), "m" (d), "m" (c), "m" (b) \
479
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
480
);
481
482
#endif
/* __MACH__ && __APPLE__ */
483
484
#endif
/* PPC32 */
485
486
/*
487
* The Sparc(64) assembly is reported to be broken.
488
* Disable it for now, until we're able to fix it.
489
*/
490
#if 0 && defined(__sparc__)
491
#if defined(__sparc64__)
492
493
#define MULADDC_INIT \
494
asm( \
495
"ldx %3, %%o0 \n\t" \
496
"ldx %4, %%o1 \n\t" \
497
"ld %5, %%o2 \n\t" \
498
"ld %6, %%o3 \n\t"
499
500
#define MULADDC_CORE \
501
"ld [%%o0], %%o4 \n\t" \
502
"inc 4, %%o0 \n\t" \
503
"ld [%%o1], %%o5 \n\t" \
504
"umul %%o3, %%o4, %%o4 \n\t" \
505
"addcc %%o4, %%o2, %%o4 \n\t" \
506
"rd %%y, %%g1 \n\t" \
507
"addx %%g1, 0, %%g1 \n\t" \
508
"addcc %%o4, %%o5, %%o4 \n\t" \
509
"st %%o4, [%%o1] \n\t" \
510
"addx %%g1, 0, %%o2 \n\t" \
511
"inc 4, %%o1 \n\t"
512
513
#define MULADDC_STOP \
514
"st %%o2, %0 \n\t" \
515
"stx %%o1, %1 \n\t" \
516
"stx %%o0, %2 \n\t" \
517
: "=m" (c), "=m" (d), "=m" (s) \
518
: "m" (s), "m" (d), "m" (c), "m" (b) \
519
: "g1", "o0", "o1", "o2", "o3", "o4", \
520
"o5" \
521
);
522
523
#else
/* __sparc64__ */
524
525
#define MULADDC_INIT \
526
asm( \
527
"ld %3, %%o0 \n\t" \
528
"ld %4, %%o1 \n\t" \
529
"ld %5, %%o2 \n\t" \
530
"ld %6, %%o3 \n\t"
531
532
#define MULADDC_CORE \
533
"ld [%%o0], %%o4 \n\t" \
534
"inc 4, %%o0 \n\t" \
535
"ld [%%o1], %%o5 \n\t" \
536
"umul %%o3, %%o4, %%o4 \n\t" \
537
"addcc %%o4, %%o2, %%o4 \n\t" \
538
"rd %%y, %%g1 \n\t" \
539
"addx %%g1, 0, %%g1 \n\t" \
540
"addcc %%o4, %%o5, %%o4 \n\t" \
541
"st %%o4, [%%o1] \n\t" \
542
"addx %%g1, 0, %%o2 \n\t" \
543
"inc 4, %%o1 \n\t"
544
545
#define MULADDC_STOP \
546
"st %%o2, %0 \n\t" \
547
"st %%o1, %1 \n\t" \
548
"st %%o0, %2 \n\t" \
549
: "=m" (c), "=m" (d), "=m" (s) \
550
: "m" (s), "m" (d), "m" (c), "m" (b) \
551
: "g1", "o0", "o1", "o2", "o3", "o4", \
552
"o5" \
553
);
554
555
#endif
/* __sparc64__ */
556
#endif
/* __sparc__ */
557
558
#if defined(__microblaze__) || defined(microblaze)
559
560
#define MULADDC_INIT \
561
asm( \
562
"lwi r3, %3 \n\t" \
563
"lwi r4, %4 \n\t" \
564
"lwi r5, %5 \n\t" \
565
"lwi r6, %6 \n\t" \
566
"andi r7, r6, 0xffff \n\t" \
567
"bsrli r6, r6, 16 \n\t"
568
569
#define MULADDC_CORE \
570
"lhui r8, r3, 0 \n\t" \
571
"addi r3, r3, 2 \n\t" \
572
"lhui r9, r3, 0 \n\t" \
573
"addi r3, r3, 2 \n\t" \
574
"mul r10, r9, r6 \n\t" \
575
"mul r11, r8, r7 \n\t" \
576
"mul r12, r9, r7 \n\t" \
577
"mul r13, r8, r6 \n\t" \
578
"bsrli r8, r10, 16 \n\t" \
579
"bsrli r9, r11, 16 \n\t" \
580
"add r13, r13, r8 \n\t" \
581
"add r13, r13, r9 \n\t" \
582
"bslli r10, r10, 16 \n\t" \
583
"bslli r11, r11, 16 \n\t" \
584
"add r12, r12, r10 \n\t" \
585
"addc r13, r13, r0 \n\t" \
586
"add r12, r12, r11 \n\t" \
587
"addc r13, r13, r0 \n\t" \
588
"lwi r10, r4, 0 \n\t" \
589
"add r12, r12, r10 \n\t" \
590
"addc r13, r13, r0 \n\t" \
591
"add r12, r12, r5 \n\t" \
592
"addc r5, r13, r0 \n\t" \
593
"swi r12, r4, 0 \n\t" \
594
"addi r4, r4, 4 \n\t"
595
596
#define MULADDC_STOP \
597
"swi r5, %0 \n\t" \
598
"swi r4, %1 \n\t" \
599
"swi r3, %2 \n\t" \
600
: "=m" (c), "=m" (d), "=m" (s) \
601
: "m" (s), "m" (d), "m" (c), "m" (b) \
602
: "r3", "r4", "r5", "r6", "r7", "r8", \
603
"r9", "r10", "r11", "r12", "r13" \
604
);
605
606
#endif
/* MicroBlaze */
607
608
#if defined(__tricore__)
609
610
#define MULADDC_INIT \
611
asm( \
612
"ld.a %%a2, %3 \n\t" \
613
"ld.a %%a3, %4 \n\t" \
614
"ld.w %%d4, %5 \n\t" \
615
"ld.w %%d1, %6 \n\t" \
616
"xor %%d5, %%d5 \n\t"
617
618
#define MULADDC_CORE \
619
"ld.w %%d0, [%%a2+] \n\t" \
620
"madd.u %%e2, %%e4, %%d0, %%d1 \n\t" \
621
"ld.w %%d0, [%%a3] \n\t" \
622
"addx %%d2, %%d2, %%d0 \n\t" \
623
"addc %%d3, %%d3, 0 \n\t" \
624
"mov %%d4, %%d3 \n\t" \
625
"st.w [%%a3+], %%d2 \n\t"
626
627
#define MULADDC_STOP \
628
"st.w %0, %%d4 \n\t" \
629
"st.a %1, %%a3 \n\t" \
630
"st.a %2, %%a2 \n\t" \
631
: "=m" (c), "=m" (d), "=m" (s) \
632
: "m" (s), "m" (d), "m" (c), "m" (b) \
633
: "d0", "d1", "e2", "d4", "a2", "a3" \
634
);
635
636
#endif
/* TriCore */
637
638
/*
639
* Note, gcc -O0 by default uses r7 for the frame pointer, so it complains about
640
* our use of r7 below, unless -fomit-frame-pointer is passed.
641
*
642
* On the other hand, -fomit-frame-pointer is implied by any -Ox options with
643
* x !=0, which we can detect using __OPTIMIZE__ (which is also defined by
644
* clang and armcc5 under the same conditions).
645
*
646
* So, only use the optimized assembly below for optimized build, which avoids
647
* the build error and is pretty reasonable anyway.
648
*/
649
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
650
#define MULADDC_CANNOT_USE_R7
651
#endif
652
653
#if defined(__arm__) && !defined(MULADDC_CANNOT_USE_R7)
654
655
#if defined(__thumb__) && !defined(__thumb2__)
656
657
#define MULADDC_INIT \
658
asm( \
659
"ldr r0, %3 \n\t" \
660
"ldr r1, %4 \n\t" \
661
"ldr r2, %5 \n\t" \
662
"ldr r3, %6 \n\t" \
663
"lsr r7, r3, #16 \n\t" \
664
"mov r9, r7 \n\t" \
665
"lsl r7, r3, #16 \n\t" \
666
"lsr r7, r7, #16 \n\t" \
667
"mov r8, r7 \n\t"
668
669
#define MULADDC_CORE \
670
"ldmia r0!, {r6} \n\t" \
671
"lsr r7, r6, #16 \n\t" \
672
"lsl r6, r6, #16 \n\t" \
673
"lsr r6, r6, #16 \n\t" \
674
"mov r4, r8 \n\t" \
675
"mul r4, r6 \n\t" \
676
"mov r3, r9 \n\t" \
677
"mul r6, r3 \n\t" \
678
"mov r5, r9 \n\t" \
679
"mul r5, r7 \n\t" \
680
"mov r3, r8 \n\t" \
681
"mul r7, r3 \n\t" \
682
"lsr r3, r6, #16 \n\t" \
683
"add r5, r5, r3 \n\t" \
684
"lsr r3, r7, #16 \n\t" \
685
"add r5, r5, r3 \n\t" \
686
"add r4, r4, r2 \n\t" \
687
"mov r2, #0 \n\t" \
688
"adc r5, r2 \n\t" \
689
"lsl r3, r6, #16 \n\t" \
690
"add r4, r4, r3 \n\t" \
691
"adc r5, r2 \n\t" \
692
"lsl r3, r7, #16 \n\t" \
693
"add r4, r4, r3 \n\t" \
694
"adc r5, r2 \n\t" \
695
"ldr r3, [r1] \n\t" \
696
"add r4, r4, r3 \n\t" \
697
"adc r2, r5 \n\t" \
698
"stmia r1!, {r4} \n\t"
699
700
#define MULADDC_STOP \
701
"str r2, %0 \n\t" \
702
"str r1, %1 \n\t" \
703
"str r0, %2 \n\t" \
704
: "=m" (c), "=m" (d), "=m" (s) \
705
: "m" (s), "m" (d), "m" (c), "m" (b) \
706
: "r0", "r1", "r2", "r3", "r4", "r5", \
707
"r6", "r7", "r8", "r9", "cc" \
708
);
709
710
#elif (__ARM_ARCH >= 6) && \
711
defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)
712
713
#define MULADDC_INIT \
714
asm(
715
716
#define MULADDC_CORE \
717
"ldr r0, [%0], #4 \n\t" \
718
"ldr r1, [%1] \n\t" \
719
"umaal r1, %2, %3, r0 \n\t" \
720
"str r1, [%1], #4 \n\t"
721
722
#define MULADDC_STOP \
723
: "=r" (s), "=r" (d), "=r" (c) \
724
: "r" (b), "0" (s), "1" (d), "2" (c) \
725
: "r0", "r1", "memory" \
726
);
727
728
#else
729
730
#define MULADDC_INIT \
731
asm( \
732
"ldr r0, %3 \n\t" \
733
"ldr r1, %4 \n\t" \
734
"ldr r2, %5 \n\t" \
735
"ldr r3, %6 \n\t"
736
737
#define MULADDC_CORE \
738
"ldr r4, [r0], #4 \n\t" \
739
"mov r5, #0 \n\t" \
740
"ldr r6, [r1] \n\t" \
741
"umlal r2, r5, r3, r4 \n\t" \
742
"adds r7, r6, r2 \n\t" \
743
"adc r2, r5, #0 \n\t" \
744
"str r7, [r1], #4 \n\t"
745
746
#define MULADDC_STOP \
747
"str r2, %0 \n\t" \
748
"str r1, %1 \n\t" \
749
"str r0, %2 \n\t" \
750
: "=m" (c), "=m" (d), "=m" (s) \
751
: "m" (s), "m" (d), "m" (c), "m" (b) \
752
: "r0", "r1", "r2", "r3", "r4", "r5", \
753
"r6", "r7", "cc" \
754
);
755
756
#endif
/* Thumb */
757
758
#endif
/* ARMv3 */
759
760
#if defined(__alpha__)
761
762
#define MULADDC_INIT \
763
asm( \
764
"ldq $1, %3 \n\t" \
765
"ldq $2, %4 \n\t" \
766
"ldq $3, %5 \n\t" \
767
"ldq $4, %6 \n\t"
768
769
#define MULADDC_CORE \
770
"ldq $6, 0($1) \n\t" \
771
"addq $1, 8, $1 \n\t" \
772
"mulq $6, $4, $7 \n\t" \
773
"umulh $6, $4, $6 \n\t" \
774
"addq $7, $3, $7 \n\t" \
775
"cmpult $7, $3, $3 \n\t" \
776
"ldq $5, 0($2) \n\t" \
777
"addq $7, $5, $7 \n\t" \
778
"cmpult $7, $5, $5 \n\t" \
779
"stq $7, 0($2) \n\t" \
780
"addq $2, 8, $2 \n\t" \
781
"addq $6, $3, $3 \n\t" \
782
"addq $5, $3, $3 \n\t"
783
784
#define MULADDC_STOP \
785
"stq $3, %0 \n\t" \
786
"stq $2, %1 \n\t" \
787
"stq $1, %2 \n\t" \
788
: "=m" (c), "=m" (d), "=m" (s) \
789
: "m" (s), "m" (d), "m" (c), "m" (b) \
790
: "$1", "$2", "$3", "$4", "$5", "$6", "$7" \
791
);
792
#endif
/* Alpha */
793
794
#if defined(__mips__) && !defined(__mips64)
795
796
#define MULADDC_INIT \
797
asm( \
798
"lw $10, %3 \n\t" \
799
"lw $11, %4 \n\t" \
800
"lw $12, %5 \n\t" \
801
"lw $13, %6 \n\t"
802
803
#define MULADDC_CORE \
804
"lw $14, 0($10) \n\t" \
805
"multu $13, $14 \n\t" \
806
"addi $10, $10, 4 \n\t" \
807
"mflo $14 \n\t" \
808
"mfhi $9 \n\t" \
809
"addu $14, $12, $14 \n\t" \
810
"lw $15, 0($11) \n\t" \
811
"sltu $12, $14, $12 \n\t" \
812
"addu $15, $14, $15 \n\t" \
813
"sltu $14, $15, $14 \n\t" \
814
"addu $12, $12, $9 \n\t" \
815
"sw $15, 0($11) \n\t" \
816
"addu $12, $12, $14 \n\t" \
817
"addi $11, $11, 4 \n\t"
818
819
#define MULADDC_STOP \
820
"sw $12, %0 \n\t" \
821
"sw $11, %1 \n\t" \
822
"sw $10, %2 \n\t" \
823
: "=m" (c), "=m" (d), "=m" (s) \
824
: "m" (s), "m" (d), "m" (c), "m" (b) \
825
: "$9", "$10", "$11", "$12", "$13", "$14", "$15", "lo", "hi" \
826
);
827
828
#endif
/* MIPS */
829
#endif
/* GNUC */
830
831
#if (defined(_MSC_VER) && defined(_M_IX86)) || defined(__WATCOMC__)
832
833
#define MULADDC_INIT \
834
__asm mov esi, s \
835
__asm mov edi, d \
836
__asm mov ecx, c \
837
__asm mov ebx, b
838
839
#define MULADDC_CORE \
840
__asm lodsd \
841
__asm mul ebx \
842
__asm add eax, ecx \
843
__asm adc edx, 0 \
844
__asm add eax, [edi] \
845
__asm adc edx, 0 \
846
__asm mov ecx, edx \
847
__asm stosd
848
849
#if defined(MBEDTLS_HAVE_SSE2)
850
851
#define EMIT __asm _emit
852
853
#define MULADDC_HUIT \
854
EMIT 0x0F EMIT 0x6E EMIT 0xC9 \
855
EMIT 0x0F EMIT 0x6E EMIT 0xC3 \
856
EMIT 0x0F EMIT 0x6E EMIT 0x1F \
857
EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
858
EMIT 0x0F EMIT 0x6E EMIT 0x16 \
859
EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
860
EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x04 \
861
EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
862
EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x08 \
863
EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
864
EMIT 0x0F EMIT 0x6E EMIT 0x7E EMIT 0x0C \
865
EMIT 0x0F EMIT 0xF4 EMIT 0xF8 \
866
EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
867
EMIT 0x0F EMIT 0x6E EMIT 0x5F EMIT 0x04 \
868
EMIT 0x0F EMIT 0xD4 EMIT 0xDC \
869
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x08 \
870
EMIT 0x0F EMIT 0xD4 EMIT 0xEE \
871
EMIT 0x0F EMIT 0x6E EMIT 0x67 EMIT 0x0C \
872
EMIT 0x0F EMIT 0xD4 EMIT 0xFC \
873
EMIT 0x0F EMIT 0x7E EMIT 0x0F \
874
EMIT 0x0F EMIT 0x6E EMIT 0x56 EMIT 0x10 \
875
EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
876
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
877
EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x14 \
878
EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
879
EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
880
EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x18 \
881
EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
882
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x04 \
883
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
884
EMIT 0x0F EMIT 0x6E EMIT 0x5E EMIT 0x1C \
885
EMIT 0x0F EMIT 0xF4 EMIT 0xD8 \
886
EMIT 0x0F EMIT 0xD4 EMIT 0xCD \
887
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x10 \
888
EMIT 0x0F EMIT 0xD4 EMIT 0xD5 \
889
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x08 \
890
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
891
EMIT 0x0F EMIT 0xD4 EMIT 0xCF \
892
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x14 \
893
EMIT 0x0F EMIT 0xD4 EMIT 0xE5 \
894
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x0C \
895
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
896
EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
897
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x18 \
898
EMIT 0x0F EMIT 0xD4 EMIT 0xF5 \
899
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x10 \
900
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
901
EMIT 0x0F EMIT 0xD4 EMIT 0xCC \
902
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x1C \
903
EMIT 0x0F EMIT 0xD4 EMIT 0xDD \
904
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x14 \
905
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
906
EMIT 0x0F EMIT 0xD4 EMIT 0xCE \
907
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x18 \
908
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
909
EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
910
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x1C \
911
EMIT 0x83 EMIT 0xC7 EMIT 0x20 \
912
EMIT 0x83 EMIT 0xC6 EMIT 0x20 \
913
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
914
EMIT 0x0F EMIT 0x7E EMIT 0xC9
915
916
#define MULADDC_STOP \
917
EMIT 0x0F EMIT 0x77 \
918
__asm mov c, ecx \
919
__asm mov d, edi \
920
__asm mov s, esi \
921
922
#else
923
924
#define MULADDC_STOP \
925
__asm mov c, ecx \
926
__asm mov d, edi \
927
__asm mov s, esi \
928
929
#endif
/* SSE2 */
930
#endif
/* MSVC */
931
932
#endif
/* MBEDTLS_HAVE_ASM */
933
934
#if !defined(MULADDC_CORE)
935
#if defined(MBEDTLS_HAVE_UDBL)
936
937
#define MULADDC_INIT \
938
{ \
939
mbedtls_t_udbl r; \
940
mbedtls_mpi_uint r0, r1;
941
942
#define MULADDC_CORE \
943
r = *(s++) * (mbedtls_t_udbl) b; \
944
r0 = (mbedtls_mpi_uint) r; \
945
r1 = (mbedtls_mpi_uint)( r >> biL ); \
946
r0 += c; r1 += (r0 < c); \
947
r0 += *d; r1 += (r0 < *d); \
948
c = r1; *(d++) = r0;
949
950
#define MULADDC_STOP \
951
}
952
953
#else
954
#define MULADDC_INIT \
955
{ \
956
mbedtls_mpi_uint s0, s1, b0, b1; \
957
mbedtls_mpi_uint r0, r1, rx, ry; \
958
b0 = ( b << biH ) >> biH; \
959
b1 = ( b >> biH );
960
961
#define MULADDC_CORE \
962
s0 = ( *s << biH ) >> biH; \
963
s1 = ( *s >> biH ); s++; \
964
rx = s0 * b1; r0 = s0 * b0; \
965
ry = s1 * b0; r1 = s1 * b1; \
966
r1 += ( rx >> biH ); \
967
r1 += ( ry >> biH ); \
968
rx <<= biH; ry <<= biH; \
969
r0 += rx; r1 += (r0 < rx); \
970
r0 += ry; r1 += (r0 < ry); \
971
r0 += c; r1 += (r0 < c); \
972
r0 += *d; r1 += (r0 < *d); \
973
c = r1; *(d++) = r0;
974
975
#define MULADDC_STOP \
976
}
977
978
#endif
/* C (generic) */
979
#endif
/* C (longlong) */
980
981
#endif
/* bn_mul.h */
bignum.h
Multi-precision integer library.
config.h
Configuration options (set of defines)
Generated on Sat Jul 24 2021 00:00:00 for mbed TLS v2.16.11 by
1.8.20