]>
git.proxmox.com Git - grub2.git/blob - grub-core/lib/libgcrypt-grub/mpi/mpih-mul.c
1 /* This file was automatically imported with
2 import_gcry.py. Please don't modify it */
3 /* mpih-mul.c - MPI helper functions
4 * Copyright (C) 1994, 1996, 1998, 1999, 2000,
5 * 2001, 2002 Free Software Foundation, Inc.
7 * This file is part of Libgcrypt.
9 * Libgcrypt is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as
11 * published by the Free Software Foundation; either version 2.1 of
12 * the License, or (at your option) any later version.
14 * Libgcrypt is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
23 * Note: This code is heavily based on the GNU MP Library.
24 * Actually it's the same code with only minor changes in the
25 * way the data is stored; this is to support the abstraction
26 * of an optional secure memory allocation which may be used
27 * to avoid revealing of sensitive data due to paging etc.
34 #include "mpi-internal.h"
38 #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
40 if( (size) < KARATSUBA_THRESHOLD ) \
41 mul_n_basecase (prodp, up, vp, size); \
43 mul_n (prodp, up, vp, size, tspace); \
46 #define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
48 if ((size) < KARATSUBA_THRESHOLD) \
49 _gcry_mpih_sqr_n_basecase (prodp, up, size); \
51 _gcry_mpih_sqr_n (prodp, up, size, tspace); \
57 /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
58 * both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
59 * always stored. Return the most significant limb.
61 * Argument constraints:
62 * 1. PRODP != UP and PRODP != VP, i.e. the destination
63 * must be distinct from the multiplier and the multiplicand.
66 * Handle simple cases with traditional multiplication.
68 * This is the most critical code of multiplication. All multiplies rely
69 * on this, both small and huge. Small ones arrive here immediately. Huge
70 * ones arrive here as this is the base case for Karatsuba's recursive
75 mul_n_basecase( mpi_ptr_t prodp
, mpi_ptr_t up
,
76 mpi_ptr_t vp
, mpi_size_t size
)
82 /* Multiply by the first limb in V separately, as the result can be
83 * stored (not added) to PROD. We also avoid a loop for zeroing. */
87 MPN_COPY( prodp
, up
, size
);
89 MPN_ZERO( prodp
, size
);
93 cy
= _gcry_mpih_mul_1( prodp
, up
, size
, v_limb
);
98 /* For each iteration in the outer loop, multiply one limb from
99 * U with one limb from V, and add it to PROD. */
100 for( i
= 1; i
< size
; i
++ ) {
105 cy
= _gcry_mpih_add_n(prodp
, prodp
, up
, size
);
108 cy
= _gcry_mpih_addmul_1(prodp
, up
, size
, v_limb
);
119 mul_n( mpi_ptr_t prodp
, mpi_ptr_t up
, mpi_ptr_t vp
,
120 mpi_size_t size
, mpi_ptr_t tspace
)
123 /* The size is odd, and the code below doesn't handle that.
124 * Multiply the least significant (size - 1) limbs with a recursive
125 * call, and handle the most significant limb of S1 and S2
127 * A slightly faster way to do this would be to make the Karatsuba
128 * code below behave as if the size were even, and let it check for
129 * odd size in the end. I.e., in essence move this code to the end.
130 * Doing so would save us a recursive call, and potentially make the
131 * stack grow a lot less.
133 mpi_size_t esize
= size
- 1; /* even size */
136 MPN_MUL_N_RECURSE( prodp
, up
, vp
, esize
, tspace
);
137 cy_limb
= _gcry_mpih_addmul_1( prodp
+ esize
, up
, esize
, vp
[esize
] );
138 prodp
[esize
+ esize
] = cy_limb
;
139 cy_limb
= _gcry_mpih_addmul_1( prodp
+ esize
, vp
, size
, up
[esize
] );
140 prodp
[esize
+ size
] = cy_limb
;
143 /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
145 * Split U in two pieces, U1 and U0, such that
146 * U = U0 + U1*(B**n),
147 * and V in V1 and V0, such that
148 * V = V0 + V1*(B**n).
150 * UV is then computed recursively using the identity
153 * UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
156 * Where B = 2**BITS_PER_MP_LIMB.
158 mpi_size_t hsize
= size
>> 1;
162 /* Product H. ________________ ________________
163 * |_____U1 x V1____||____U0 x V0_____|
164 * Put result in upper part of PROD and pass low part of TSPACE
167 MPN_MUL_N_RECURSE(prodp
+ size
, up
+ hsize
, vp
+ hsize
, hsize
, tspace
);
169 /* Product M. ________________
172 if( _gcry_mpih_cmp(up
+ hsize
, up
, hsize
) >= 0 ) {
173 _gcry_mpih_sub_n(prodp
, up
+ hsize
, up
, hsize
);
177 _gcry_mpih_sub_n(prodp
, up
, up
+ hsize
, hsize
);
180 if( _gcry_mpih_cmp(vp
+ hsize
, vp
, hsize
) >= 0 ) {
181 _gcry_mpih_sub_n(prodp
+ hsize
, vp
+ hsize
, vp
, hsize
);
185 _gcry_mpih_sub_n(prodp
+ hsize
, vp
, vp
+ hsize
, hsize
);
186 /* No change of NEGFLG. */
188 /* Read temporary operands from low part of PROD.
189 * Put result in low part of TSPACE using upper part of TSPACE
192 MPN_MUL_N_RECURSE(tspace
, prodp
, prodp
+ hsize
, hsize
, tspace
+ size
);
194 /* Add/copy product H. */
195 MPN_COPY (prodp
+ hsize
, prodp
+ size
, hsize
);
196 cy
= _gcry_mpih_add_n( prodp
+ size
, prodp
+ size
,
197 prodp
+ size
+ hsize
, hsize
);
199 /* Add product M (if NEGFLG M is a negative number) */
201 cy
-= _gcry_mpih_sub_n(prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
203 cy
+= _gcry_mpih_add_n(prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
205 /* Product L. ________________ ________________
206 * |________________||____U0 x V0_____|
207 * Read temporary operands from low part of PROD.
208 * Put result in low part of TSPACE using upper part of TSPACE
211 MPN_MUL_N_RECURSE(tspace
, up
, vp
, hsize
, tspace
+ size
);
213 /* Add/copy Product L (twice) */
215 cy
+= _gcry_mpih_add_n(prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
217 _gcry_mpih_add_1(prodp
+ hsize
+ size
, prodp
+ hsize
+ size
, hsize
, cy
);
219 MPN_COPY(prodp
, tspace
, hsize
);
220 cy
= _gcry_mpih_add_n(prodp
+ hsize
, prodp
+ hsize
, tspace
+ hsize
, hsize
);
222 _gcry_mpih_add_1(prodp
+ size
, prodp
+ size
, size
, 1);
228 _gcry_mpih_sqr_n_basecase( mpi_ptr_t prodp
, mpi_ptr_t up
, mpi_size_t size
)
234 /* Multiply by the first limb in V separately, as the result can be
235 * stored (not added) to PROD. We also avoid a loop for zeroing. */
239 MPN_COPY( prodp
, up
, size
);
241 MPN_ZERO(prodp
, size
);
245 cy_limb
= _gcry_mpih_mul_1( prodp
, up
, size
, v_limb
);
247 prodp
[size
] = cy_limb
;
250 /* For each iteration in the outer loop, multiply one limb from
251 * U with one limb from V, and add it to PROD. */
252 for( i
=1; i
< size
; i
++) {
257 cy_limb
= _gcry_mpih_add_n(prodp
, prodp
, up
, size
);
260 cy_limb
= _gcry_mpih_addmul_1(prodp
, up
, size
, v_limb
);
262 prodp
[size
] = cy_limb
;
269 _gcry_mpih_sqr_n( mpi_ptr_t prodp
,
270 mpi_ptr_t up
, mpi_size_t size
, mpi_ptr_t tspace
)
273 /* The size is odd, and the code below doesn't handle that.
274 * Multiply the least significant (size - 1) limbs with a recursive
275 * call, and handle the most significant limb of S1 and S2
277 * A slightly faster way to do this would be to make the Karatsuba
278 * code below behave as if the size were even, and let it check for
279 * odd size in the end. I.e., in essence move this code to the end.
280 * Doing so would save us a recursive call, and potentially make the
281 * stack grow a lot less.
283 mpi_size_t esize
= size
- 1; /* even size */
286 MPN_SQR_N_RECURSE( prodp
, up
, esize
, tspace
);
287 cy_limb
= _gcry_mpih_addmul_1( prodp
+ esize
, up
, esize
, up
[esize
] );
288 prodp
[esize
+ esize
] = cy_limb
;
289 cy_limb
= _gcry_mpih_addmul_1( prodp
+ esize
, up
, size
, up
[esize
] );
291 prodp
[esize
+ size
] = cy_limb
;
294 mpi_size_t hsize
= size
>> 1;
297 /* Product H. ________________ ________________
298 * |_____U1 x U1____||____U0 x U0_____|
299 * Put result in upper part of PROD and pass low part of TSPACE
302 MPN_SQR_N_RECURSE(prodp
+ size
, up
+ hsize
, hsize
, tspace
);
304 /* Product M. ________________
307 if( _gcry_mpih_cmp( up
+ hsize
, up
, hsize
) >= 0 )
308 _gcry_mpih_sub_n( prodp
, up
+ hsize
, up
, hsize
);
310 _gcry_mpih_sub_n (prodp
, up
, up
+ hsize
, hsize
);
312 /* Read temporary operands from low part of PROD.
313 * Put result in low part of TSPACE using upper part of TSPACE
315 MPN_SQR_N_RECURSE(tspace
, prodp
, hsize
, tspace
+ size
);
317 /* Add/copy product H */
318 MPN_COPY(prodp
+ hsize
, prodp
+ size
, hsize
);
319 cy
= _gcry_mpih_add_n(prodp
+ size
, prodp
+ size
,
320 prodp
+ size
+ hsize
, hsize
);
322 /* Add product M (if NEGFLG M is a negative number). */
323 cy
-= _gcry_mpih_sub_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
325 /* Product L. ________________ ________________
326 * |________________||____U0 x U0_____|
327 * Read temporary operands from low part of PROD.
328 * Put result in low part of TSPACE using upper part of TSPACE
330 MPN_SQR_N_RECURSE (tspace
, up
, hsize
, tspace
+ size
);
332 /* Add/copy Product L (twice). */
333 cy
+= _gcry_mpih_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
, size
);
335 _gcry_mpih_add_1(prodp
+ hsize
+ size
, prodp
+ hsize
+ size
,
338 MPN_COPY(prodp
, tspace
, hsize
);
339 cy
= _gcry_mpih_add_n (prodp
+ hsize
, prodp
+ hsize
, tspace
+ hsize
, hsize
);
341 _gcry_mpih_add_1 (prodp
+ size
, prodp
+ size
, size
, 1);
346 /* This should be made into an inline function in gmp.h. */
348 _gcry_mpih_mul_n( mpi_ptr_t prodp
,
349 mpi_ptr_t up
, mpi_ptr_t vp
, mpi_size_t size
)
354 if( size
< KARATSUBA_THRESHOLD
)
355 _gcry_mpih_sqr_n_basecase( prodp
, up
, size
);
358 secure
= gcry_is_secure( up
);
359 tspace
= mpi_alloc_limb_space( 2 * size
, secure
);
360 _gcry_mpih_sqr_n( prodp
, up
, size
, tspace
);
361 _gcry_mpi_free_limb_space (tspace
, 2 * size
);
365 if( size
< KARATSUBA_THRESHOLD
)
366 mul_n_basecase( prodp
, up
, vp
, size
);
369 secure
= gcry_is_secure( up
) || gcry_is_secure( vp
);
370 tspace
= mpi_alloc_limb_space( 2 * size
, secure
);
371 mul_n (prodp
, up
, vp
, size
, tspace
);
372 _gcry_mpi_free_limb_space (tspace
, 2 * size
);
380 _gcry_mpih_mul_karatsuba_case( mpi_ptr_t prodp
,
381 mpi_ptr_t up
, mpi_size_t usize
,
382 mpi_ptr_t vp
, mpi_size_t vsize
,
383 struct karatsuba_ctx
*ctx
)
387 if( !ctx
->tspace
|| ctx
->tspace_size
< vsize
) {
389 _gcry_mpi_free_limb_space( ctx
->tspace
, ctx
->tspace_nlimbs
);
390 ctx
->tspace_nlimbs
= 2 * vsize
;
391 ctx
->tspace
= mpi_alloc_limb_space( 2 * vsize
,
392 (gcry_is_secure( up
)
393 || gcry_is_secure( vp
)) );
394 ctx
->tspace_size
= vsize
;
397 MPN_MUL_N_RECURSE( prodp
, up
, vp
, vsize
, ctx
->tspace
);
402 if( usize
>= vsize
) {
403 if( !ctx
->tp
|| ctx
->tp_size
< vsize
) {
405 _gcry_mpi_free_limb_space( ctx
->tp
, ctx
->tp_nlimbs
);
406 ctx
->tp_nlimbs
= 2 * vsize
;
407 ctx
->tp
= mpi_alloc_limb_space( 2 * vsize
, gcry_is_secure( up
)
408 || gcry_is_secure( vp
) );
409 ctx
->tp_size
= vsize
;
413 MPN_MUL_N_RECURSE( ctx
->tp
, up
, vp
, vsize
, ctx
->tspace
);
414 cy
= _gcry_mpih_add_n( prodp
, prodp
, ctx
->tp
, vsize
);
415 _gcry_mpih_add_1( prodp
+ vsize
, ctx
->tp
+ vsize
, vsize
, cy
);
419 } while( usize
>= vsize
);
423 if( usize
< KARATSUBA_THRESHOLD
) {
424 _gcry_mpih_mul( ctx
->tspace
, vp
, vsize
, up
, usize
);
428 ctx
->next
= gcry_xcalloc( 1, sizeof *ctx
);
430 _gcry_mpih_mul_karatsuba_case( ctx
->tspace
,
436 cy
= _gcry_mpih_add_n( prodp
, prodp
, ctx
->tspace
, vsize
);
437 _gcry_mpih_add_1( prodp
+ vsize
, ctx
->tspace
+ vsize
, usize
, cy
);
443 _gcry_mpih_release_karatsuba_ctx( struct karatsuba_ctx
*ctx
)
445 struct karatsuba_ctx
*ctx2
;
448 _gcry_mpi_free_limb_space( ctx
->tp
, ctx
->tp_nlimbs
);
450 _gcry_mpi_free_limb_space( ctx
->tspace
, ctx
->tspace_nlimbs
);
451 for( ctx
=ctx
->next
; ctx
; ctx
= ctx2
) {
454 _gcry_mpi_free_limb_space( ctx
->tp
, ctx
->tp_nlimbs
);
456 _gcry_mpi_free_limb_space( ctx
->tspace
, ctx
->tspace_nlimbs
);
461 /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
462 * and v (pointed to by VP, with VSIZE limbs), and store the result at
463 * PRODP. USIZE + VSIZE limbs are always stored, but if the input
464 * operands are normalized. Return the most significant limb of the
467 * NOTE: The space pointed to by PRODP is overwritten before finished
468 * with U and V, so overlap is an error.
470 * Argument constraints:
472 * 2. PRODP != UP and PRODP != VP, i.e. the destination
473 * must be distinct from the multiplier and the multiplicand.
477 _gcry_mpih_mul( mpi_ptr_t prodp
, mpi_ptr_t up
, mpi_size_t usize
,
478 mpi_ptr_t vp
, mpi_size_t vsize
)
480 mpi_ptr_t prod_endp
= prodp
+ usize
+ vsize
- 1;
482 struct karatsuba_ctx ctx
;
484 if( vsize
< KARATSUBA_THRESHOLD
) {
491 /* Multiply by the first limb in V separately, as the result can be
492 * stored (not added) to PROD. We also avoid a loop for zeroing. */
496 MPN_COPY( prodp
, up
, usize
);
498 MPN_ZERO( prodp
, usize
);
502 cy
= _gcry_mpih_mul_1( prodp
, up
, usize
, v_limb
);
507 /* For each iteration in the outer loop, multiply one limb from
508 * U with one limb from V, and add it to PROD. */
509 for( i
= 1; i
< vsize
; i
++ ) {
514 cy
= _gcry_mpih_add_n(prodp
, prodp
, up
, usize
);
517 cy
= _gcry_mpih_addmul_1(prodp
, up
, usize
, v_limb
);
526 memset( &ctx
, 0, sizeof ctx
);
527 _gcry_mpih_mul_karatsuba_case( prodp
, up
, usize
, vp
, vsize
, &ctx
);
528 _gcry_mpih_release_karatsuba_ctx( &ctx
);