2 Long (arbitrary precision) integer object implementation.
4 Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials are licensed and made available under
6 the terms and conditions of the BSD License that accompanies this distribution.
7 The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14 /* XXX The functional organization of this file is terrible */
17 #include "longintrepr.h"
18 #include "structseq.h"
24 /* For long multiplication, use the O(N**2) school algorithm unless
25 * both operands contain more than KARATSUBA_CUTOFF digits (this
26 * being an internal Python long digit, in base PyLong_BASE).
28 #define KARATSUBA_CUTOFF 70
29 #define KARATSUBA_SQUARE_CUTOFF (2 * KARATSUBA_CUTOFF)
31 /* For exponentiation, use the binary left-to-right algorithm
32 * unless the exponent contains more than FIVEARY_CUTOFF digits.
33 * In that case, do 5 bits at a time. The potential drawback is that
34 * a table of 2**5 intermediate results is computed.
36 #define FIVEARY_CUTOFF 8
39 #define ABS(x) ((x) < 0 ? -(x) : (x))
43 #define MAX(x, y) ((x) < (y) ? (y) : (x))
47 #define MIN(x, y) ((x) > (y) ? (y) : (x))
50 #define SIGCHECK(PyTryBlock) \
52 if (--_Py_Ticker < 0) { \
53 _Py_Ticker = _Py_CheckInterval; \
54 if (PyErr_CheckSignals()) PyTryBlock \
58 /* Normalize (remove leading zeros from) a long int object.
59 Doesn't attempt to free the storage--in most cases, due to the nature
60 of the algorithms used, this could save at most be one word anyway. */
63 long_normalize(register PyLongObject
*v
)
65 Py_ssize_t j
= ABS(Py_SIZE(v
));
68 while (i
> 0 && v
->ob_digit
[i
-1] == 0)
71 Py_SIZE(v
) = (Py_SIZE(v
) < 0) ? -(i
) : i
;
75 /* Allocate a new long int object with size digits.
76 Return NULL and set exception if we run out of memory. */
78 #define MAX_LONG_DIGITS \
79 ((PY_SSIZE_T_MAX - offsetof(PyLongObject, ob_digit))/sizeof(digit))
82 _PyLong_New(Py_ssize_t size
)
84 if (size
> (Py_ssize_t
)MAX_LONG_DIGITS
) {
85 PyErr_SetString(PyExc_OverflowError
,
86 "too many digits in integer");
89 /* coverity[ampersand_in_size] */
90 /* XXX(nnorwitz): PyObject_NEW_VAR / _PyObject_VAR_SIZE need to detect
92 return PyObject_NEW_VAR(PyLongObject
, &PyLong_Type
, size
);
96 _PyLong_Copy(PyLongObject
*src
)
105 result
= _PyLong_New(i
);
106 if (result
!= NULL
) {
107 result
->ob_size
= src
->ob_size
;
109 result
->ob_digit
[i
] = src
->ob_digit
[i
];
111 return (PyObject
*)result
;
114 /* Create a new long int object from a C long int */
117 PyLong_FromLong(long ival
)
120 unsigned long abs_ival
;
121 unsigned long t
; /* unsigned so >> doesn't propagate sign bit */
126 /* if LONG_MIN == -LONG_MAX-1 (true on most platforms) then
127 ANSI C says that the result of -ival is undefined when ival
128 == LONG_MIN. Hence the following workaround. */
129 abs_ival
= (unsigned long)(-1-ival
) + 1;
133 abs_ival
= (unsigned long)ival
;
136 /* Count the number of Python digits.
137 We used to pick 5 ("big enough for anything"), but that's a
138 waste of time and space given that 5*15 = 75 bits are rarely
145 v
= _PyLong_New(ndigits
);
147 digit
*p
= v
->ob_digit
;
148 v
->ob_size
= negative
? -ndigits
: ndigits
;
151 *p
++ = (digit
)(t
& PyLong_MASK
);
155 return (PyObject
*)v
;
158 /* Create a new long int object from a C unsigned long int */
161 PyLong_FromUnsignedLong(unsigned long ival
)
167 /* Count the number of Python digits. */
168 t
= (unsigned long)ival
;
173 v
= _PyLong_New(ndigits
);
175 digit
*p
= v
->ob_digit
;
176 Py_SIZE(v
) = ndigits
;
178 *p
++ = (digit
)(ival
& PyLong_MASK
);
179 ival
>>= PyLong_SHIFT
;
182 return (PyObject
*)v
;
185 /* Create a new long int object from a C double */
188 PyLong_FromDouble(double dval
)
192 int i
, ndig
, expo
, neg
;
194 if (Py_IS_INFINITY(dval
)) {
195 PyErr_SetString(PyExc_OverflowError
,
196 "cannot convert float infinity to integer");
199 if (Py_IS_NAN(dval
)) {
200 PyErr_SetString(PyExc_ValueError
,
201 "cannot convert float NaN to integer");
208 frac
= frexp(dval
, &expo
); /* dval = frac*2**expo; 0.0 <= frac < 1.0 */
210 return PyLong_FromLong(0L);
211 ndig
= (expo
-1) / PyLong_SHIFT
+ 1; /* Number of 'digits' in result */
212 v
= _PyLong_New(ndig
);
215 frac
= ldexp(frac
, (expo
-1) % PyLong_SHIFT
+ 1);
216 for (i
= ndig
; --i
>= 0; ) {
217 digit bits
= (digit
)frac
;
218 v
->ob_digit
[i
] = bits
;
219 frac
= frac
- (double)bits
;
220 frac
= ldexp(frac
, PyLong_SHIFT
);
223 Py_SIZE(v
) = -(Py_SIZE(v
));
224 return (PyObject
*)v
;
227 /* Checking for overflow in PyLong_AsLong is a PITA since C doesn't define
228 * anything about what happens when a signed integer operation overflows,
229 * and some compilers think they're doing you a favor by being "clever"
230 * then. The bit pattern for the largest postive signed long is
231 * (unsigned long)LONG_MAX, and for the smallest negative signed long
232 * it is abs(LONG_MIN), which we could write -(unsigned long)LONG_MIN.
233 * However, some other compilers warn about applying unary minus to an
234 * unsigned operand. Hence the weird "0-".
236 #define PY_ABS_LONG_MIN (0-(unsigned long)LONG_MIN)
237 #define PY_ABS_SSIZE_T_MIN (0-(size_t)PY_SSIZE_T_MIN)
239 /* Get a C long int from a Python long or Python int object.
240 On overflow, returns -1 and sets *overflow to 1 or -1 depending
241 on the sign of the result. Otherwise *overflow is 0.
243 For other errors (e.g., type error), returns -1 and sets an error
248 PyLong_AsLongAndOverflow(PyObject
*vv
, int *overflow
)
250 /* This version by Tim Peters */
251 register PyLongObject
*v
;
252 unsigned long x
, prev
;
256 int do_decref
= 0; /* if nb_int was called */
260 PyErr_BadInternalCall();
265 return PyInt_AsLong(vv
);
267 if (!PyLong_Check(vv
)) {
269 nb
= vv
->ob_type
->tp_as_number
;
270 if (nb
== NULL
|| nb
->nb_int
== NULL
) {
271 PyErr_SetString(PyExc_TypeError
,
272 "an integer is required");
275 vv
= (*nb
->nb_int
) (vv
);
279 if(PyInt_Check(vv
)) {
280 res
= PyInt_AsLong(vv
);
283 if (!PyLong_Check(vv
)) {
285 PyErr_SetString(PyExc_TypeError
,
286 "nb_int should return int object");
292 v
= (PyLongObject
*)vv
;
297 res
= -(sdigit
)v
->ob_digit
[0];
303 res
= v
->ob_digit
[0];
314 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
315 if ((x
>> PyLong_SHIFT
) != prev
) {
320 /* Haven't lost any bits, but casting to long requires extra
321 * care (see comment above).
323 if (x
<= (unsigned long)LONG_MAX
) {
324 res
= (long)x
* sign
;
326 else if (sign
< 0 && x
== PY_ABS_LONG_MIN
) {
331 /* res is already set to -1 */
341 /* Get a C long int from a long int object.
342 Returns -1 and sets an error condition if overflow occurs. */
345 PyLong_AsLong(PyObject
*obj
)
348 long result
= PyLong_AsLongAndOverflow(obj
, &overflow
);
350 /* XXX: could be cute and give a different
351 message for overflow == -1 */
352 PyErr_SetString(PyExc_OverflowError
,
353 "Python int too large to convert to C long");
358 /* Get a Py_ssize_t from a long int object.
359 Returns -1 and sets an error condition if overflow occurs. */
362 PyLong_AsSsize_t(PyObject
*vv
) {
363 register PyLongObject
*v
;
368 if (vv
== NULL
|| !PyLong_Check(vv
)) {
369 PyErr_BadInternalCall();
372 v
= (PyLongObject
*)vv
;
382 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
383 if ((x
>> PyLong_SHIFT
) != prev
)
386 /* Haven't lost any bits, but casting to a signed type requires
387 * extra care (see comment above).
389 if (x
<= (size_t)PY_SSIZE_T_MAX
) {
390 return (Py_ssize_t
)x
* sign
;
392 else if (sign
< 0 && x
== PY_ABS_SSIZE_T_MIN
) {
393 return PY_SSIZE_T_MIN
;
398 PyErr_SetString(PyExc_OverflowError
,
399 "long int too large to convert to int");
403 /* Get a C unsigned long int from a long int object.
404 Returns -1 and sets an error condition if overflow occurs. */
407 PyLong_AsUnsignedLong(PyObject
*vv
)
409 register PyLongObject
*v
;
410 unsigned long x
, prev
;
413 if (vv
== NULL
|| !PyLong_Check(vv
)) {
414 if (vv
!= NULL
&& PyInt_Check(vv
)) {
415 long val
= PyInt_AsLong(vv
);
417 PyErr_SetString(PyExc_OverflowError
,
418 "can't convert negative value "
420 return (unsigned long) -1;
424 PyErr_BadInternalCall();
425 return (unsigned long) -1;
427 v
= (PyLongObject
*)vv
;
431 PyErr_SetString(PyExc_OverflowError
,
432 "can't convert negative value to unsigned long");
433 return (unsigned long) -1;
437 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
438 if ((x
>> PyLong_SHIFT
) != prev
) {
439 PyErr_SetString(PyExc_OverflowError
,
440 "long int too large to convert");
441 return (unsigned long) -1;
447 /* Get a C unsigned long int from a long int object, ignoring the high bits.
448 Returns -1 and sets an error condition if an error occurs. */
451 PyLong_AsUnsignedLongMask(PyObject
*vv
)
453 register PyLongObject
*v
;
458 if (vv
== NULL
|| !PyLong_Check(vv
)) {
459 if (vv
!= NULL
&& PyInt_Check(vv
))
460 return PyInt_AsUnsignedLongMask(vv
);
461 PyErr_BadInternalCall();
462 return (unsigned long) -1;
464 v
= (PyLongObject
*)vv
;
473 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
479 _PyLong_Sign(PyObject
*vv
)
481 PyLongObject
*v
= (PyLongObject
*)vv
;
484 assert(PyLong_Check(v
));
486 return Py_SIZE(v
) == 0 ? 0 : (Py_SIZE(v
) < 0 ? -1 : 1);
490 _PyLong_NumBits(PyObject
*vv
)
492 PyLongObject
*v
= (PyLongObject
*)vv
;
497 assert(PyLong_Check(v
));
498 ndigits
= ABS(Py_SIZE(v
));
499 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
501 digit msd
= v
->ob_digit
[ndigits
- 1];
503 result
= (ndigits
- 1) * PyLong_SHIFT
;
504 if (result
/ PyLong_SHIFT
!= (size_t)(ndigits
- 1))
516 PyErr_SetString(PyExc_OverflowError
, "long has too many bits "
517 "to express in a platform size_t");
522 _PyLong_FromByteArray(const unsigned char* bytes
, size_t n
,
523 int little_endian
, int is_signed
)
525 const unsigned char* pstartbyte
; /* LSB of bytes */
526 int incr
; /* direction to move pstartbyte */
527 const unsigned char* pendbyte
; /* MSB of bytes */
528 size_t numsignificantbytes
; /* number of bytes that matter */
529 Py_ssize_t ndigits
; /* number of Python long digits */
530 PyLongObject
* v
; /* result */
531 Py_ssize_t idigit
= 0; /* next free index in v->ob_digit */
534 return PyLong_FromLong(0L);
538 pendbyte
= bytes
+ n
- 1;
542 pstartbyte
= bytes
+ n
- 1;
548 is_signed
= *pendbyte
>= 0x80;
550 /* Compute numsignificantbytes. This consists of finding the most
551 significant byte. Leading 0 bytes are insignificant if the number
552 is positive, and leading 0xff bytes if negative. */
555 const unsigned char* p
= pendbyte
;
556 const int pincr
= -incr
; /* search MSB to LSB */
557 const unsigned char insignficant
= is_signed
? 0xff : 0x00;
559 for (i
= 0; i
< n
; ++i
, p
+= pincr
) {
560 if (*p
!= insignficant
)
563 numsignificantbytes
= n
- i
;
564 /* 2's-comp is a bit tricky here, e.g. 0xff00 == -0x0100, so
565 actually has 2 significant bytes. OTOH, 0xff0001 ==
566 -0x00ffff, so we wouldn't *need* to bump it there; but we
567 do for 0xffff = -0x0001. To be safe without bothering to
568 check every case, bump it regardless. */
569 if (is_signed
&& numsignificantbytes
< n
)
570 ++numsignificantbytes
;
573 /* How many Python long digits do we need? We have
574 8*numsignificantbytes bits, and each Python long digit has
575 PyLong_SHIFT bits, so it's the ceiling of the quotient. */
576 /* catch overflow before it happens */
577 if (numsignificantbytes
> (PY_SSIZE_T_MAX
- PyLong_SHIFT
) / 8) {
578 PyErr_SetString(PyExc_OverflowError
,
579 "byte array too long to convert to int");
582 ndigits
= (numsignificantbytes
* 8 + PyLong_SHIFT
- 1) / PyLong_SHIFT
;
583 v
= _PyLong_New(ndigits
);
587 /* Copy the bits over. The tricky parts are computing 2's-comp on
588 the fly for signed numbers, and dealing with the mismatch between
589 8-bit bytes and (probably) 15-bit Python digits.*/
592 twodigits carry
= 1; /* for 2's-comp calculation */
593 twodigits accum
= 0; /* sliding register */
594 unsigned int accumbits
= 0; /* number of bits in accum */
595 const unsigned char* p
= pstartbyte
;
597 for (i
= 0; i
< numsignificantbytes
; ++i
, p
+= incr
) {
598 twodigits thisbyte
= *p
;
599 /* Compute correction for 2's comp, if needed. */
601 thisbyte
= (0xff ^ thisbyte
) + carry
;
602 carry
= thisbyte
>> 8;
605 /* Because we're going LSB to MSB, thisbyte is
606 more significant than what's already in accum,
607 so needs to be prepended to accum. */
608 accum
|= (twodigits
)thisbyte
<< accumbits
;
610 if (accumbits
>= PyLong_SHIFT
) {
611 /* There's enough to fill a Python digit. */
612 assert(idigit
< ndigits
);
613 v
->ob_digit
[idigit
] = (digit
)(accum
& PyLong_MASK
);
615 accum
>>= PyLong_SHIFT
;
616 accumbits
-= PyLong_SHIFT
;
617 assert(accumbits
< PyLong_SHIFT
);
620 assert(accumbits
< PyLong_SHIFT
);
622 assert(idigit
< ndigits
);
623 v
->ob_digit
[idigit
] = (digit
)accum
;
628 Py_SIZE(v
) = is_signed
? -idigit
: idigit
;
629 return (PyObject
*)long_normalize(v
);
633 _PyLong_AsByteArray(PyLongObject
* v
,
634 unsigned char* bytes
, size_t n
,
635 int little_endian
, int is_signed
)
637 Py_ssize_t i
; /* index into v->ob_digit */
638 Py_ssize_t ndigits
; /* |v->ob_size| */
639 twodigits accum
; /* sliding register */
640 unsigned int accumbits
; /* # bits in accum */
641 int do_twos_comp
; /* store 2's-comp? is_signed and v < 0 */
642 digit carry
; /* for computing 2's-comp */
643 size_t j
; /* # bytes filled */
644 unsigned char* p
; /* pointer to next byte in bytes */
645 int pincr
; /* direction to move p */
647 assert(v
!= NULL
&& PyLong_Check(v
));
649 if (Py_SIZE(v
) < 0) {
650 ndigits
= -(Py_SIZE(v
));
652 PyErr_SetString(PyExc_OverflowError
,
653 "can't convert negative long to unsigned");
659 ndigits
= Py_SIZE(v
);
672 /* Copy over all the Python digits.
673 It's crucial that every Python digit except for the MSD contribute
674 exactly PyLong_SHIFT bits to the total, so first assert that the long is
676 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
680 carry
= do_twos_comp
? 1 : 0;
681 for (i
= 0; i
< ndigits
; ++i
) {
682 digit thisdigit
= v
->ob_digit
[i
];
684 thisdigit
= (thisdigit
^ PyLong_MASK
) + carry
;
685 carry
= thisdigit
>> PyLong_SHIFT
;
686 thisdigit
&= PyLong_MASK
;
688 /* Because we're going LSB to MSB, thisdigit is more
689 significant than what's already in accum, so needs to be
690 prepended to accum. */
691 accum
|= (twodigits
)thisdigit
<< accumbits
;
693 /* The most-significant digit may be (probably is) at least
695 if (i
== ndigits
- 1) {
696 /* Count # of sign bits -- they needn't be stored,
697 * although for signed conversion we need later to
698 * make sure at least one sign bit gets stored. */
699 digit s
= do_twos_comp
? thisdigit
^ PyLong_MASK
: thisdigit
;
706 accumbits
+= PyLong_SHIFT
;
708 /* Store as many bytes as possible. */
709 while (accumbits
>= 8) {
713 *p
= (unsigned char)(accum
& 0xff);
720 /* Store the straggler (if any). */
721 assert(accumbits
< 8);
722 assert(carry
== 0); /* else do_twos_comp and *every* digit was 0 */
728 /* Fill leading bits of the byte with sign bits
729 (appropriately pretending that the long had an
730 infinite supply of sign bits). */
731 accum
|= (~(twodigits
)0) << accumbits
;
733 *p
= (unsigned char)(accum
& 0xff);
736 else if (j
== n
&& n
> 0 && is_signed
) {
737 /* The main loop filled the byte array exactly, so the code
738 just above didn't get to ensure there's a sign bit, and the
739 loop below wouldn't add one either. Make sure a sign bit
741 unsigned char msb
= *(p
- pincr
);
742 int sign_bit_set
= msb
>= 0x80;
743 assert(accumbits
== 0);
744 if (sign_bit_set
== do_twos_comp
)
750 /* Fill remaining bytes with copies of the sign bit. */
752 unsigned char signbyte
= do_twos_comp
? 0xffU
: 0U;
753 for ( ; j
< n
; ++j
, p
+= pincr
)
760 PyErr_SetString(PyExc_OverflowError
, "long too big to convert");
765 /* Create a new long (or int) object from a C pointer */
768 PyLong_FromVoidPtr(void *p
)
770 #if SIZEOF_VOID_P <= SIZEOF_LONG
772 return PyLong_FromUnsignedLong((unsigned long)p
);
773 return PyInt_FromLong((long)p
);
776 #ifndef HAVE_LONG_LONG
777 # error "PyLong_FromVoidPtr: sizeof(void*) > sizeof(long), but no long long"
779 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
780 # error "PyLong_FromVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
782 /* optimize null pointers */
784 return PyInt_FromLong(0);
785 return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG
)p
);
787 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
790 /* Get a C pointer from a long object (or an int object in some cases) */
793 PyLong_AsVoidPtr(PyObject
*vv
)
795 /* This function will allow int or long objects. If vv is neither,
796 then the PyLong_AsLong*() functions will raise the exception:
797 PyExc_SystemError, "bad argument to internal function"
799 #if SIZEOF_VOID_P <= SIZEOF_LONG
803 x
= PyInt_AS_LONG(vv
);
804 else if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
805 x
= PyLong_AsLong(vv
);
807 x
= PyLong_AsUnsignedLong(vv
);
810 #ifndef HAVE_LONG_LONG
811 # error "PyLong_AsVoidPtr: sizeof(void*) > sizeof(long), but no long long"
813 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
814 # error "PyLong_AsVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
819 x
= PyInt_AS_LONG(vv
);
820 else if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
821 x
= PyLong_AsLongLong(vv
);
823 x
= PyLong_AsUnsignedLongLong(vv
);
825 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
827 if (x
== -1 && PyErr_Occurred())
832 #ifdef HAVE_LONG_LONG
834 /* Initial PY_LONG_LONG support by Chris Herborth (chrish@qnx.com), later
835 * rewritten to use the newer PyLong_{As,From}ByteArray API.
838 #define IS_LITTLE_ENDIAN (int)*(unsigned char*)&one
839 #define PY_ABS_LLONG_MIN (0-(unsigned PY_LONG_LONG)PY_LLONG_MIN)
841 /* Create a new long int object from a C PY_LONG_LONG int. */
844 PyLong_FromLongLong(PY_LONG_LONG ival
)
847 unsigned PY_LONG_LONG abs_ival
;
848 unsigned PY_LONG_LONG t
; /* unsigned so >> doesn't propagate sign bit */
853 /* avoid signed overflow on negation; see comments
854 in PyLong_FromLong above. */
855 abs_ival
= (unsigned PY_LONG_LONG
)(-1-ival
) + 1;
859 abs_ival
= (unsigned PY_LONG_LONG
)ival
;
862 /* Count the number of Python digits.
863 We used to pick 5 ("big enough for anything"), but that's a
864 waste of time and space given that 5*15 = 75 bits are rarely
871 v
= _PyLong_New(ndigits
);
873 digit
*p
= v
->ob_digit
;
874 Py_SIZE(v
) = negative
? -ndigits
: ndigits
;
877 *p
++ = (digit
)(t
& PyLong_MASK
);
881 return (PyObject
*)v
;
884 /* Create a new long int object from a C unsigned PY_LONG_LONG int. */
887 PyLong_FromUnsignedLongLong(unsigned PY_LONG_LONG ival
)
890 unsigned PY_LONG_LONG t
;
893 /* Count the number of Python digits. */
894 t
= (unsigned PY_LONG_LONG
)ival
;
899 v
= _PyLong_New(ndigits
);
901 digit
*p
= v
->ob_digit
;
902 Py_SIZE(v
) = ndigits
;
904 *p
++ = (digit
)(ival
& PyLong_MASK
);
905 ival
>>= PyLong_SHIFT
;
908 return (PyObject
*)v
;
911 /* Create a new long int object from a C Py_ssize_t. */
914 PyLong_FromSsize_t(Py_ssize_t ival
)
916 Py_ssize_t bytes
= ival
;
918 return _PyLong_FromByteArray((unsigned char *)&bytes
,
919 SIZEOF_SIZE_T
, IS_LITTLE_ENDIAN
, 1);
922 /* Create a new long int object from a C size_t. */
925 PyLong_FromSize_t(size_t ival
)
929 return _PyLong_FromByteArray((unsigned char *)&bytes
,
930 SIZEOF_SIZE_T
, IS_LITTLE_ENDIAN
, 0);
933 /* Get a C PY_LONG_LONG int from a long int object.
934 Return -1 and set an error if overflow occurs. */
937 PyLong_AsLongLong(PyObject
*vv
)
944 PyErr_BadInternalCall();
947 if (!PyLong_Check(vv
)) {
951 return (PY_LONG_LONG
)PyInt_AsLong(vv
);
952 if ((nb
= vv
->ob_type
->tp_as_number
) == NULL
||
953 nb
->nb_int
== NULL
) {
954 PyErr_SetString(PyExc_TypeError
, "an integer is required");
957 io
= (*nb
->nb_int
) (vv
);
960 if (PyInt_Check(io
)) {
961 bytes
= PyInt_AsLong(io
);
965 if (PyLong_Check(io
)) {
966 bytes
= PyLong_AsLongLong(io
);
971 PyErr_SetString(PyExc_TypeError
, "integer conversion failed");
975 res
= _PyLong_AsByteArray((PyLongObject
*)vv
, (unsigned char *)&bytes
,
976 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 1);
978 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
980 return (PY_LONG_LONG
)-1;
985 /* Get a C unsigned PY_LONG_LONG int from a long int object.
986 Return -1 and set an error if overflow occurs. */
988 unsigned PY_LONG_LONG
989 PyLong_AsUnsignedLongLong(PyObject
*vv
)
991 unsigned PY_LONG_LONG bytes
;
995 if (vv
== NULL
|| !PyLong_Check(vv
)) {
996 PyErr_BadInternalCall();
997 return (unsigned PY_LONG_LONG
)-1;
1000 res
= _PyLong_AsByteArray((PyLongObject
*)vv
, (unsigned char *)&bytes
,
1001 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 0);
1003 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
1005 return (unsigned PY_LONG_LONG
)res
;
1010 /* Get a C unsigned long int from a long int object, ignoring the high bits.
1011 Returns -1 and sets an error condition if an error occurs. */
1013 unsigned PY_LONG_LONG
1014 PyLong_AsUnsignedLongLongMask(PyObject
*vv
)
1016 register PyLongObject
*v
;
1017 unsigned PY_LONG_LONG x
;
1021 if (vv
== NULL
|| !PyLong_Check(vv
)) {
1022 PyErr_BadInternalCall();
1023 return (unsigned long) -1;
1025 v
= (PyLongObject
*)vv
;
1034 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
1039 /* Get a C long long int from a Python long or Python int object.
1040 On overflow, returns -1 and sets *overflow to 1 or -1 depending
1041 on the sign of the result. Otherwise *overflow is 0.
1043 For other errors (e.g., type error), returns -1 and sets an error
1048 PyLong_AsLongLongAndOverflow(PyObject
*vv
, int *overflow
)
1050 /* This version by Tim Peters */
1051 register PyLongObject
*v
;
1052 unsigned PY_LONG_LONG x
, prev
;
1056 int do_decref
= 0; /* if nb_int was called */
1060 PyErr_BadInternalCall();
1064 if (PyInt_Check(vv
))
1065 return PyInt_AsLong(vv
);
1067 if (!PyLong_Check(vv
)) {
1068 PyNumberMethods
*nb
;
1069 nb
= vv
->ob_type
->tp_as_number
;
1070 if (nb
== NULL
|| nb
->nb_int
== NULL
) {
1071 PyErr_SetString(PyExc_TypeError
,
1072 "an integer is required");
1075 vv
= (*nb
->nb_int
) (vv
);
1079 if(PyInt_Check(vv
)) {
1080 res
= PyInt_AsLong(vv
);
1083 if (!PyLong_Check(vv
)) {
1085 PyErr_SetString(PyExc_TypeError
,
1086 "nb_int should return int object");
1092 v
= (PyLongObject
*)vv
;
1097 res
= -(sdigit
)v
->ob_digit
[0];
1103 res
= v
->ob_digit
[0];
1114 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
1115 if ((x
>> PyLong_SHIFT
) != prev
) {
1120 /* Haven't lost any bits, but casting to long requires extra
1121 * care (see comment above).
1123 if (x
<= (unsigned PY_LONG_LONG
)PY_LLONG_MAX
) {
1124 res
= (PY_LONG_LONG
)x
* sign
;
1126 else if (sign
< 0 && x
== PY_ABS_LLONG_MIN
) {
1131 /* res is already set to -1 */
1141 #undef IS_LITTLE_ENDIAN
1143 #endif /* HAVE_LONG_LONG */
1147 convert_binop(PyObject
*v
, PyObject
*w
, PyLongObject
**a
, PyLongObject
**b
) {
1148 if (PyLong_Check(v
)) {
1149 *a
= (PyLongObject
*) v
;
1152 else if (PyInt_Check(v
)) {
1153 *a
= (PyLongObject
*) PyLong_FromLong(PyInt_AS_LONG(v
));
1158 if (PyLong_Check(w
)) {
1159 *b
= (PyLongObject
*) w
;
1162 else if (PyInt_Check(w
)) {
1163 *b
= (PyLongObject
*) PyLong_FromLong(PyInt_AS_LONG(w
));
1172 #define CONVERT_BINOP(v, w, a, b) \
1174 if (!convert_binop(v, w, a, b)) { \
1175 Py_INCREF(Py_NotImplemented); \
1176 return Py_NotImplemented; \
1180 /* bits_in_digit(d) returns the unique integer k such that 2**(k-1) <= d <
1181 2**k if d is nonzero, else 0. */
1183 static const unsigned char BitLengthTable
[32] = {
1184 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
1185 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
1189 bits_in_digit(digit d
)
1196 d_bits
+= (int)BitLengthTable
[d
];
1200 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1201 * is modified in place, by adding y to it. Carries are propagated as far as
1202 * x[m-1], and the remaining carry (0 or 1) is returned.
1205 v_iadd(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1211 for (i
= 0; i
< n
; ++i
) {
1212 carry
+= x
[i
] + y
[i
];
1213 x
[i
] = carry
& PyLong_MASK
;
1214 carry
>>= PyLong_SHIFT
;
1215 assert((carry
& 1) == carry
);
1217 for (; carry
&& i
< m
; ++i
) {
1219 x
[i
] = carry
& PyLong_MASK
;
1220 carry
>>= PyLong_SHIFT
;
1221 assert((carry
& 1) == carry
);
1226 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1227 * is modified in place, by subtracting y from it. Borrows are propagated as
1228 * far as x[m-1], and the remaining borrow (0 or 1) is returned.
1231 v_isub(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1237 for (i
= 0; i
< n
; ++i
) {
1238 borrow
= x
[i
] - y
[i
] - borrow
;
1239 x
[i
] = borrow
& PyLong_MASK
;
1240 borrow
>>= PyLong_SHIFT
;
1241 borrow
&= 1; /* keep only 1 sign bit */
1243 for (; borrow
&& i
< m
; ++i
) {
1244 borrow
= x
[i
] - borrow
;
1245 x
[i
] = borrow
& PyLong_MASK
;
1246 borrow
>>= PyLong_SHIFT
;
1252 /* Shift digit vector a[0:m] d bits left, with 0 <= d < PyLong_SHIFT. Put
1253 * result in z[0:m], and return the d bits shifted out of the top.
1256 v_lshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1261 assert(0 <= d
&& d
< PyLong_SHIFT
);
1262 for (i
=0; i
< m
; i
++) {
1263 twodigits acc
= (twodigits
)a
[i
] << d
| carry
;
1264 z
[i
] = (digit
)acc
& PyLong_MASK
;
1265 carry
= (digit
)(acc
>> PyLong_SHIFT
);
1270 /* Shift digit vector a[0:m] d bits right, with 0 <= d < PyLong_SHIFT. Put
1271 * result in z[0:m], and return the d bits shifted out of the bottom.
1274 v_rshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1278 digit mask
= ((digit
)1 << d
) - 1U;
1280 assert(0 <= d
&& d
< PyLong_SHIFT
);
1281 for (i
=m
; i
-- > 0;) {
1282 twodigits acc
= (twodigits
)carry
<< PyLong_SHIFT
| a
[i
];
1283 carry
= (digit
)acc
& mask
;
1284 z
[i
] = (digit
)(acc
>> d
);
1289 /* Divide long pin, w/ size digits, by non-zero digit n, storing quotient
1290 in pout, and returning the remainder. pin and pout point at the LSD.
1291 It's OK for pin == pout on entry, which saves oodles of mallocs/frees in
1292 _PyLong_Format, but that should be done with great care since longs are
1296 inplace_divrem1(digit
*pout
, digit
*pin
, Py_ssize_t size
, digit n
)
1300 assert(n
> 0 && n
<= PyLong_MASK
);
1303 while (--size
>= 0) {
1305 rem
= (rem
<< PyLong_SHIFT
) | *--pin
;
1306 *--pout
= hi
= (digit
)(rem
/ n
);
1307 rem
-= (twodigits
)hi
* n
;
1312 /* Divide a long integer by a digit, returning both the quotient
1313 (as function result) and the remainder (through *prem).
1314 The sign of a is ignored; n should not be zero. */
1316 static PyLongObject
*
1317 divrem1(PyLongObject
*a
, digit n
, digit
*prem
)
1319 const Py_ssize_t size
= ABS(Py_SIZE(a
));
1322 assert(n
> 0 && n
<= PyLong_MASK
);
1323 z
= _PyLong_New(size
);
1326 *prem
= inplace_divrem1(z
->ob_digit
, a
->ob_digit
, size
, n
);
1327 return long_normalize(z
);
1330 /* Convert a long integer to a base 10 string. Returns a new non-shared
1331 string. (Return value is non-shared so that callers can modify the
1332 returned value if necessary.) */
1335 long_to_decimal_string(PyObject
*aa
, int addL
)
1337 PyLongObject
*scratch
, *a
;
1339 Py_ssize_t size
, strlen
, size_a
, i
, j
;
1340 digit
*pout
, *pin
, rem
, tenpow
;
1344 a
= (PyLongObject
*)aa
;
1345 if (a
== NULL
|| !PyLong_Check(a
)) {
1346 PyErr_BadInternalCall();
1349 size_a
= ABS(Py_SIZE(a
));
1350 negative
= Py_SIZE(a
) < 0;
1352 /* quick and dirty upper bound for the number of digits
1353 required to express a in base _PyLong_DECIMAL_BASE:
1355 #digits = 1 + floor(log2(a) / log2(_PyLong_DECIMAL_BASE))
1357 But log2(a) < size_a * PyLong_SHIFT, and
1358 log2(_PyLong_DECIMAL_BASE) = log2(10) * _PyLong_DECIMAL_SHIFT
1359 > 3 * _PyLong_DECIMAL_SHIFT
1361 if (size_a
> PY_SSIZE_T_MAX
/ PyLong_SHIFT
) {
1362 PyErr_SetString(PyExc_OverflowError
,
1363 "long is too large to format");
1366 /* the expression size_a * PyLong_SHIFT is now safe from overflow */
1367 size
= 1 + size_a
* PyLong_SHIFT
/ (3 * _PyLong_DECIMAL_SHIFT
);
1368 scratch
= _PyLong_New(size
);
1369 if (scratch
== NULL
)
1372 /* convert array of base _PyLong_BASE digits in pin to an array of
1373 base _PyLong_DECIMAL_BASE digits in pout, following Knuth (TAOCP,
1374 Volume 2 (3rd edn), section 4.4, Method 1b). */
1376 pout
= scratch
->ob_digit
;
1378 for (i
= size_a
; --i
>= 0; ) {
1380 for (j
= 0; j
< size
; j
++) {
1381 twodigits z
= (twodigits
)pout
[j
] << PyLong_SHIFT
| hi
;
1382 hi
= (digit
)(z
/ _PyLong_DECIMAL_BASE
);
1383 pout
[j
] = (digit
)(z
- (twodigits
)hi
*
1384 _PyLong_DECIMAL_BASE
);
1387 pout
[size
++] = hi
% _PyLong_DECIMAL_BASE
;
1388 hi
/= _PyLong_DECIMAL_BASE
;
1390 /* check for keyboard interrupt */
1396 /* pout should have at least one digit, so that the case when a = 0
1401 /* calculate exact length of output string, and allocate */
1402 strlen
= (addL
!= 0) + negative
+
1403 1 + (size
- 1) * _PyLong_DECIMAL_SHIFT
;
1406 while (rem
>= tenpow
) {
1410 str
= PyString_FromStringAndSize(NULL
, strlen
);
1416 /* fill the string right-to-left */
1417 p
= PyString_AS_STRING(str
) + strlen
;
1421 /* pout[0] through pout[size-2] contribute exactly
1422 _PyLong_DECIMAL_SHIFT digits each */
1423 for (i
=0; i
< size
- 1; i
++) {
1425 for (j
= 0; j
< _PyLong_DECIMAL_SHIFT
; j
++) {
1426 *--p
= '0' + rem
% 10;
1430 /* pout[size-1]: always produce at least one decimal digit */
1433 *--p
= '0' + rem
% 10;
1441 /* check we've counted correctly */
1442 assert(p
== PyString_AS_STRING(str
));
1444 return (PyObject
*)str
;
1447 /* Convert the long to a string object with given base,
1448 appending a base prefix of 0[box] if base is 2, 8 or 16.
1449 Add a trailing "L" if addL is non-zero.
1450 If newstyle is zero, then use the pre-2.6 behavior of octal having
1451 a leading "0", instead of the prefix "0o" */
1452 PyAPI_FUNC(PyObject
*)
1453 _PyLong_Format(PyObject
*aa
, int base
, int addL
, int newstyle
)
1455 register PyLongObject
*a
= (PyLongObject
*)aa
;
1456 PyStringObject
*str
;
1464 return long_to_decimal_string((PyObject
*)a
, addL
);
1466 if (a
== NULL
|| !PyLong_Check(a
)) {
1467 PyErr_BadInternalCall();
1470 assert(base
>= 2 && base
<= 36);
1471 size_a
= ABS(Py_SIZE(a
));
1473 /* Compute a rough upper bound for the length of the string */
1480 i
= 5 + (addL
? 1 : 0);
1481 /* ensure we don't get signed overflow in sz calculation */
1482 if (size_a
> (PY_SSIZE_T_MAX
- i
) / PyLong_SHIFT
) {
1483 PyErr_SetString(PyExc_OverflowError
,
1484 "long is too large to format");
1487 sz
= i
+ 1 + (size_a
* PyLong_SHIFT
- 1) / bits
;
1489 str
= (PyStringObject
*) PyString_FromStringAndSize((char *)0, sz
);
1492 p
= PyString_AS_STRING(str
) + sz
;
1499 if (a
->ob_size
== 0) {
1502 else if ((base
& (base
- 1)) == 0) {
1503 /* JRH: special case for power-of-2 bases */
1504 twodigits accum
= 0;
1505 int accumbits
= 0; /* # of bits in accum */
1506 int basebits
= 1; /* # of bits in base-1 */
1508 while ((i
>>= 1) > 1)
1511 for (i
= 0; i
< size_a
; ++i
) {
1512 accum
|= (twodigits
)a
->ob_digit
[i
] << accumbits
;
1513 accumbits
+= PyLong_SHIFT
;
1514 assert(accumbits
>= basebits
);
1516 char cdigit
= (char)(accum
& (base
- 1));
1517 cdigit
+= (cdigit
< 10) ? '0' : 'a'-10;
1518 assert(p
> PyString_AS_STRING(str
));
1520 accumbits
-= basebits
;
1522 } while (i
< size_a
-1 ? accumbits
>= basebits
: accum
> 0);
1526 /* Not 0, and base not a power of 2. Divide repeatedly by
1527 base, but for speed use the highest power of base that
1529 Py_ssize_t size
= size_a
;
1530 digit
*pin
= a
->ob_digit
;
1531 PyLongObject
*scratch
;
1532 /* powbasw <- largest power of base that fits in a digit. */
1533 digit powbase
= base
; /* powbase == base ** power */
1536 twodigits newpow
= powbase
* (twodigits
)base
;
1537 if (newpow
>> PyLong_SHIFT
)
1538 /* doesn't fit in a digit */
1540 powbase
= (digit
)newpow
;
1544 /* Get a scratch area for repeated division. */
1545 scratch
= _PyLong_New(size
);
1546 if (scratch
== NULL
) {
1551 /* Repeatedly divide by powbase. */
1553 int ntostore
= power
;
1554 digit rem
= inplace_divrem1(scratch
->ob_digit
,
1555 pin
, size
, powbase
);
1556 pin
= scratch
->ob_digit
; /* no need to use a again */
1557 if (pin
[size
- 1] == 0)
1565 /* Break rem into digits. */
1566 assert(ntostore
> 0);
1568 digit nextrem
= (digit
)(rem
/ base
);
1569 char c
= (char)(rem
- nextrem
* base
);
1570 assert(p
> PyString_AS_STRING(str
));
1571 c
+= (c
< 10) ? '0' : 'a'-10;
1575 /* Termination is a bit delicate: must not
1576 store leading zeroes, so must get out if
1577 remaining quotient and rem are both 0. */
1578 } while (ntostore
&& (size
|| rem
));
1579 } while (size
!= 0);
1587 else if (base
== 8) {
1596 else if (base
== 16) {
1600 else if (base
!= 10) {
1602 *--p
= '0' + base
%10;
1604 *--p
= '0' + base
/10;
1608 if (p
!= PyString_AS_STRING(str
)) {
1609 char *q
= PyString_AS_STRING(str
);
1612 } while ((*q
++ = *p
++) != '\0');
1614 _PyString_Resize((PyObject
**)&str
,
1615 (Py_ssize_t
) (q
- PyString_AS_STRING(str
)));
1617 return (PyObject
*)str
;
1620 /* Table of digit values for 8-bit string -> integer conversion.
1621 * '0' maps to 0, ..., '9' maps to 9.
1622 * 'a' and 'A' map to 10, ..., 'z' and 'Z' map to 35.
1623 * All other indices map to 37.
1624 * Note that when converting a base B string, a char c is a legitimate
1625 * base B digit iff _PyLong_DigitValue[Py_CHARMASK(c)] < B.
1627 int _PyLong_DigitValue
[256] = {
1628 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1629 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1630 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1631 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 37, 37, 37, 37, 37, 37,
1632 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1633 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1634 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1635 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1636 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1637 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1638 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1639 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1640 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1641 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1642 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1643 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1646 /* *str points to the first digit in a string of base `base` digits. base
1647 * is a power of 2 (2, 4, 8, 16, or 32). *str is set to point to the first
1648 * non-digit (which may be *str!). A normalized long is returned.
1649 * The point to this routine is that it takes time linear in the number of
1650 * string characters.
1652 static PyLongObject
*
1653 long_from_binary_base(char **str
, int base
)
1664 assert(base
>= 2 && base
<= 32 && (base
& (base
- 1)) == 0);
1666 for (bits_per_char
= -1; n
; ++bits_per_char
)
1668 /* n <- total # of bits needed, while setting p to end-of-string */
1669 while (_PyLong_DigitValue
[Py_CHARMASK(*p
)] < base
)
1672 /* n <- # of Python digits needed, = ceiling(n/PyLong_SHIFT). */
1673 n
= (p
- start
) * bits_per_char
+ PyLong_SHIFT
- 1;
1674 if (n
/ bits_per_char
< p
- start
) {
1675 PyErr_SetString(PyExc_ValueError
,
1676 "long string too large to convert");
1679 n
= n
/ PyLong_SHIFT
;
1683 /* Read string from right, and fill in long from left; i.e.,
1684 * from least to most significant in both.
1688 pdigit
= z
->ob_digit
;
1689 while (--p
>= start
) {
1690 int k
= _PyLong_DigitValue
[Py_CHARMASK(*p
)];
1691 assert(k
>= 0 && k
< base
);
1692 accum
|= (twodigits
)k
<< bits_in_accum
;
1693 bits_in_accum
+= bits_per_char
;
1694 if (bits_in_accum
>= PyLong_SHIFT
) {
1695 *pdigit
++ = (digit
)(accum
& PyLong_MASK
);
1696 assert(pdigit
- z
->ob_digit
<= n
);
1697 accum
>>= PyLong_SHIFT
;
1698 bits_in_accum
-= PyLong_SHIFT
;
1699 assert(bits_in_accum
< PyLong_SHIFT
);
1702 if (bits_in_accum
) {
1703 assert(bits_in_accum
<= PyLong_SHIFT
);
1704 *pdigit
++ = (digit
)accum
;
1705 assert(pdigit
- z
->ob_digit
<= n
);
1707 while (pdigit
- z
->ob_digit
< n
)
1709 return long_normalize(z
);
1713 PyLong_FromString(char *str
, char **pend
, int base
)
1716 char *start
, *orig_str
= str
;
1718 PyObject
*strobj
, *strrepr
;
1721 if ((base
!= 0 && base
< 2) || base
> 36) {
1722 PyErr_SetString(PyExc_ValueError
,
1723 "long() arg 2 must be >= 2 and <= 36");
1726 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1730 else if (*str
== '-') {
1734 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1737 /* No base given. Deduce the base from the contents
1741 else if (str
[1] == 'x' || str
[1] == 'X')
1743 else if (str
[1] == 'o' || str
[1] == 'O')
1745 else if (str
[1] == 'b' || str
[1] == 'B')
1748 /* "old" (C-style) octal literal, still valid in
1749 2.x, although illegal in 3.x */
1752 /* Whether or not we were deducing the base, skip leading chars
1754 if (str
[0] == '0' &&
1755 ((base
== 16 && (str
[1] == 'x' || str
[1] == 'X')) ||
1756 (base
== 8 && (str
[1] == 'o' || str
[1] == 'O')) ||
1757 (base
== 2 && (str
[1] == 'b' || str
[1] == 'B'))))
1761 if ((base
& (base
- 1)) == 0)
1762 z
= long_from_binary_base(&str
, base
);
1765 Binary bases can be converted in time linear in the number of digits, because
1766 Python's representation base is binary. Other bases (including decimal!) use
1767 the simple quadratic-time algorithm below, complicated by some speed tricks.
1769 First some math: the largest integer that can be expressed in N base-B digits
1770 is B**N-1. Consequently, if we have an N-digit input in base B, the worst-
1771 case number of Python digits needed to hold it is the smallest integer n s.t.
1773 PyLong_BASE**n-1 >= B**N-1 [or, adding 1 to both sides]
1774 PyLong_BASE**n >= B**N [taking logs to base PyLong_BASE]
1775 n >= log(B**N)/log(PyLong_BASE) = N * log(B)/log(PyLong_BASE)
1777 The static array log_base_PyLong_BASE[base] == log(base)/log(PyLong_BASE) so
1778 we can compute this quickly. A Python long with that much space is reserved
1779 near the start, and the result is computed into it.
1781 The input string is actually treated as being in base base**i (i.e., i digits
1782 are processed at a time), where two more static arrays hold:
1784 convwidth_base[base] = the largest integer i such that
1785 base**i <= PyLong_BASE
1786 convmultmax_base[base] = base ** convwidth_base[base]
1788 The first of these is the largest i such that i consecutive input digits
1789 must fit in a single Python digit. The second is effectively the input
1790 base we're really using.
1792 Viewing the input as a sequence <c0, c1, ..., c_n-1> of digits in base
1793 convmultmax_base[base], the result is "simply"
1795 (((c0*B + c1)*B + c2)*B + c3)*B + ... ))) + c_n-1
1797 where B = convmultmax_base[base].
1799 Error analysis: as above, the number of Python digits `n` needed is worst-
1802 n >= N * log(B)/log(PyLong_BASE)
1804 where `N` is the number of input digits in base `B`. This is computed via
1806 size_z = (Py_ssize_t)((scan - str) * log_base_PyLong_BASE[base]) + 1;
1808 below. Two numeric concerns are how much space this can waste, and whether
1809 the computed result can be too small. To be concrete, assume PyLong_BASE =
1810 2**15, which is the default (and it's unlikely anyone changes that).
1812 Waste isn't a problem: provided the first input digit isn't 0, the difference
1813 between the worst-case input with N digits and the smallest input with N
1814 digits is about a factor of B, but B is small compared to PyLong_BASE so at
1815 most one allocated Python digit can remain unused on that count. If
1816 N*log(B)/log(PyLong_BASE) is mathematically an exact integer, then truncating
1817 that and adding 1 returns a result 1 larger than necessary. However, that
1818 can't happen: whenever B is a power of 2, long_from_binary_base() is called
1819 instead, and it's impossible for B**i to be an integer power of 2**15 when B
1820 is not a power of 2 (i.e., it's impossible for N*log(B)/log(PyLong_BASE) to be
1821 an exact integer when B is not a power of 2, since B**i has a prime factor
1822 other than 2 in that case, but (2**15)**j's only prime factor is 2).
1824 The computed result can be too small if the true value of
1825 N*log(B)/log(PyLong_BASE) is a little bit larger than an exact integer, but
1826 due to roundoff errors (in computing log(B), log(PyLong_BASE), their quotient,
1827 and/or multiplying that by N) yields a numeric result a little less than that
1828 integer. Unfortunately, "how close can a transcendental function get to an
1829 integer over some range?" questions are generally theoretically intractable.
1830 Computer analysis via continued fractions is practical: expand
1831 log(B)/log(PyLong_BASE) via continued fractions, giving a sequence i/j of "the
1832 best" rational approximations. Then j*log(B)/log(PyLong_BASE) is
1833 approximately equal to (the integer) i. This shows that we can get very close
1834 to being in trouble, but very rarely. For example, 76573 is a denominator in
1835 one of the continued-fraction approximations to log(10)/log(2**15), and
1838 >>> log(10)/log(2**15)*76573
1841 is very close to an integer. If we were working with IEEE single-precision,
1842 rounding errors could kill us. Finding worst cases in IEEE double-precision
1843 requires better-than-double-precision log() functions, and Tim didn't bother.
1844 Instead the code checks to see whether the allocated space is enough as each
1845 new Python digit is added, and copies the whole thing to a larger long if not.
1846 This should happen extremely rarely, and in fact I don't have a test case
1847 that triggers it(!). Instead the code was tested by artificially allocating
1848 just 1 digit at the start, so that the copying code was exercised for every
1849 digit beyond the first.
1851 register twodigits c
; /* current input character */
1855 twodigits convmultmax
, convmult
;
1859 static double log_base_PyLong_BASE
[37] = {0.0e0
,};
1860 static int convwidth_base
[37] = {0,};
1861 static twodigits convmultmax_base
[37] = {0,};
1863 if (log_base_PyLong_BASE
[base
] == 0.0) {
1864 twodigits convmax
= base
;
1867 log_base_PyLong_BASE
[base
] = (log((double)base
) /
1868 log((double)PyLong_BASE
));
1870 twodigits next
= convmax
* base
;
1871 if (next
> PyLong_BASE
)
1876 convmultmax_base
[base
] = convmax
;
1878 convwidth_base
[base
] = i
;
1881 /* Find length of the string of numeric characters. */
1883 while (_PyLong_DigitValue
[Py_CHARMASK(*scan
)] < base
)
1886 /* Create a long object that can contain the largest possible
1887 * integer with this base and length. Note that there's no
1888 * need to initialize z->ob_digit -- no slot is read up before
1889 * being stored into.
1891 size_z
= (Py_ssize_t
)((scan
- str
) * log_base_PyLong_BASE
[base
]) + 1;
1892 /* Uncomment next line to test exceedingly rare copy code */
1895 z
= _PyLong_New(size_z
);
1900 /* `convwidth` consecutive input digits are treated as a single
1901 * digit in base `convmultmax`.
1903 convwidth
= convwidth_base
[base
];
1904 convmultmax
= convmultmax_base
[base
];
1907 while (str
< scan
) {
1908 /* grab up to convwidth digits from the input string */
1909 c
= (digit
)_PyLong_DigitValue
[Py_CHARMASK(*str
++)];
1910 for (i
= 1; i
< convwidth
&& str
!= scan
; ++i
, ++str
) {
1911 c
= (twodigits
)(c
* base
+
1912 _PyLong_DigitValue
[Py_CHARMASK(*str
)]);
1913 assert(c
< PyLong_BASE
);
1916 convmult
= convmultmax
;
1917 /* Calculate the shift only if we couldn't get
1920 if (i
!= convwidth
) {
1926 /* Multiply z by convmult, and add c. */
1928 pzstop
= pz
+ Py_SIZE(z
);
1929 for (; pz
< pzstop
; ++pz
) {
1930 c
+= (twodigits
)*pz
* convmult
;
1931 *pz
= (digit
)(c
& PyLong_MASK
);
1934 /* carry off the current end? */
1936 assert(c
< PyLong_BASE
);
1937 if (Py_SIZE(z
) < size_z
) {
1943 /* Extremely rare. Get more space. */
1944 assert(Py_SIZE(z
) == size_z
);
1945 tmp
= _PyLong_New(size_z
+ 1);
1950 memcpy(tmp
->ob_digit
,
1952 sizeof(digit
) * size_z
);
1955 z
->ob_digit
[size_z
] = (digit
)c
;
1966 Py_SIZE(z
) = -(Py_SIZE(z
));
1967 if (*str
== 'L' || *str
== 'l')
1969 while (*str
&& isspace(Py_CHARMASK(*str
)))
1975 return (PyObject
*) z
;
1979 slen
= strlen(orig_str
) < 200 ? strlen(orig_str
) : 200;
1980 strobj
= PyString_FromStringAndSize(orig_str
, slen
);
1983 strrepr
= PyObject_Repr(strobj
);
1985 if (strrepr
== NULL
)
1987 PyErr_Format(PyExc_ValueError
,
1988 "invalid literal for long() with base %d: %s",
1989 base
, PyString_AS_STRING(strrepr
));
1994 #ifdef Py_USING_UNICODE
1996 PyLong_FromUnicode(Py_UNICODE
*u
, Py_ssize_t length
, int base
)
1999 char *buffer
= (char *)PyMem_MALLOC(length
+1);
2004 if (PyUnicode_EncodeDecimal(u
, length
, buffer
, NULL
)) {
2008 result
= PyLong_FromString(buffer
, NULL
, base
);
2015 static PyLongObject
*x_divrem
2016 (PyLongObject
*, PyLongObject
*, PyLongObject
**);
2017 static PyObject
*long_long(PyObject
*v
);
2019 /* Long division with remainder, top-level routine */
2022 long_divrem(PyLongObject
*a
, PyLongObject
*b
,
2023 PyLongObject
**pdiv
, PyLongObject
**prem
)
2025 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2029 PyErr_SetString(PyExc_ZeroDivisionError
,
2030 "long division or modulo by zero");
2033 if (size_a
< size_b
||
2034 (size_a
== size_b
&&
2035 a
->ob_digit
[size_a
-1] < b
->ob_digit
[size_b
-1])) {
2037 *pdiv
= _PyLong_New(0);
2041 *prem
= (PyLongObject
*) a
;
2046 z
= divrem1(a
, b
->ob_digit
[0], &rem
);
2049 *prem
= (PyLongObject
*) PyLong_FromLong((long)rem
);
2050 if (*prem
== NULL
) {
2056 z
= x_divrem(a
, b
, prem
);
2061 The quotient z has the sign of a*b;
2062 the remainder r has the sign of a,
2064 if ((a
->ob_size
< 0) != (b
->ob_size
< 0))
2065 z
->ob_size
= -(z
->ob_size
);
2066 if (a
->ob_size
< 0 && (*prem
)->ob_size
!= 0)
2067 (*prem
)->ob_size
= -((*prem
)->ob_size
);
2072 /* Unsigned long division with remainder -- the algorithm. The arguments v1
2073 and w1 should satisfy 2 <= ABS(Py_SIZE(w1)) <= ABS(Py_SIZE(v1)). */
2075 static PyLongObject
*
2076 x_divrem(PyLongObject
*v1
, PyLongObject
*w1
, PyLongObject
**prem
)
2078 PyLongObject
*v
, *w
, *a
;
2079 Py_ssize_t i
, k
, size_v
, size_w
;
2081 digit wm1
, wm2
, carry
, q
, r
, vtop
, *v0
, *vk
, *w0
, *ak
;
2086 /* We follow Knuth [The Art of Computer Programming, Vol. 2 (3rd
2087 edn.), section 4.3.1, Algorithm D], except that we don't explicitly
2088 handle the special case when the initial estimate q for a quotient
2089 digit is >= PyLong_BASE: the max value for q is PyLong_BASE+1, and
2090 that won't overflow a digit. */
2092 /* allocate space; w will also be used to hold the final remainder */
2093 size_v
= ABS(Py_SIZE(v1
));
2094 size_w
= ABS(Py_SIZE(w1
));
2095 assert(size_v
>= size_w
&& size_w
>= 2); /* Assert checks by div() */
2096 v
= _PyLong_New(size_v
+1);
2101 w
= _PyLong_New(size_w
);
2108 /* normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
2109 shift v1 left by the same amount. Results go into w and v. */
2110 d
= PyLong_SHIFT
- bits_in_digit(w1
->ob_digit
[size_w
-1]);
2111 carry
= v_lshift(w
->ob_digit
, w1
->ob_digit
, size_w
, d
);
2113 carry
= v_lshift(v
->ob_digit
, v1
->ob_digit
, size_v
, d
);
2114 if (carry
!= 0 || v
->ob_digit
[size_v
-1] >= w
->ob_digit
[size_w
-1]) {
2115 v
->ob_digit
[size_v
] = carry
;
2119 /* Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
2120 at most (and usually exactly) k = size_v - size_w digits. */
2121 k
= size_v
- size_w
;
2134 for (vk
= v0
+k
, ak
= a
->ob_digit
+ k
; vk
-- > v0
;) {
2135 /* inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
2136 single-digit quotient q, remainder in vk[0:size_w]. */
2146 /* estimate quotient digit q; may overestimate by 1 (rare) */
2148 assert(vtop
<= wm1
);
2149 vv
= ((twodigits
)vtop
<< PyLong_SHIFT
) | vk
[size_w
-1];
2150 q
= (digit
)(vv
/ wm1
);
2151 r
= (digit
)(vv
- (twodigits
)wm1
* q
); /* r = vv % wm1 */
2152 while ((twodigits
)wm2
* q
> (((twodigits
)r
<< PyLong_SHIFT
)
2156 if (r
>= PyLong_BASE
)
2159 assert(q
<= PyLong_BASE
);
2161 /* subtract q*w0[0:size_w] from vk[0:size_w+1] */
2163 for (i
= 0; i
< size_w
; ++i
) {
2164 /* invariants: -PyLong_BASE <= -q <= zhi <= 0;
2165 -PyLong_BASE * q <= z < PyLong_BASE */
2166 z
= (sdigit
)vk
[i
] + zhi
-
2167 (stwodigits
)q
* (stwodigits
)w0
[i
];
2168 vk
[i
] = (digit
)z
& PyLong_MASK
;
2169 zhi
= (sdigit
)Py_ARITHMETIC_RIGHT_SHIFT(stwodigits
,
2173 /* add w back if q was too large (this branch taken rarely) */
2174 assert((sdigit
)vtop
+ zhi
== -1 || (sdigit
)vtop
+ zhi
== 0);
2175 if ((sdigit
)vtop
+ zhi
< 0) {
2177 for (i
= 0; i
< size_w
; ++i
) {
2178 carry
+= vk
[i
] + w0
[i
];
2179 vk
[i
] = carry
& PyLong_MASK
;
2180 carry
>>= PyLong_SHIFT
;
2185 /* store quotient digit */
2186 assert(q
< PyLong_BASE
);
2190 /* unshift remainder; we reuse w to store the result */
2191 carry
= v_rshift(w0
, v0
, size_w
, d
);
2195 *prem
= long_normalize(w
);
2196 return long_normalize(a
);
2199 /* For a nonzero PyLong a, express a in the form x * 2**e, with 0.5 <=
2200 abs(x) < 1.0 and e >= 0; return x and put e in *e. Here x is
2201 rounded to DBL_MANT_DIG significant bits using round-half-to-even.
2202 If a == 0, return 0.0 and set *e = 0. If the resulting exponent
2203 e is larger than PY_SSIZE_T_MAX, raise OverflowError and return
2206 /* attempt to define 2.0**DBL_MANT_DIG as a compile-time constant */
2207 #if DBL_MANT_DIG == 53
2208 #define EXP2_DBL_MANT_DIG 9007199254740992.0
2210 #define EXP2_DBL_MANT_DIG (ldexp(1.0, DBL_MANT_DIG))
2214 _PyLong_Frexp(PyLongObject
*a
, Py_ssize_t
*e
)
2216 Py_ssize_t a_size
, a_bits
, shift_digits
, shift_bits
, x_size
;
2217 /* See below for why x_digits is always large enough. */
2218 digit rem
, x_digits
[2 + (DBL_MANT_DIG
+ 1) / PyLong_SHIFT
];
2220 /* Correction term for round-half-to-even rounding. For a digit x,
2221 "x + half_even_correction[x & 7]" gives x rounded to the nearest
2222 multiple of 4, rounding ties to a multiple of 8. */
2223 static const int half_even_correction
[8] = {0, -1, -2, 1, 0, -1, 2, 1};
2225 a_size
= ABS(Py_SIZE(a
));
2227 /* Special case for 0: significand 0.0, exponent 0. */
2231 a_bits
= bits_in_digit(a
->ob_digit
[a_size
-1]);
2232 /* The following is an overflow-free version of the check
2233 "if ((a_size - 1) * PyLong_SHIFT + a_bits > PY_SSIZE_T_MAX) ..." */
2234 if (a_size
>= (PY_SSIZE_T_MAX
- 1) / PyLong_SHIFT
+ 1 &&
2235 (a_size
> (PY_SSIZE_T_MAX
- 1) / PyLong_SHIFT
+ 1 ||
2236 a_bits
> (PY_SSIZE_T_MAX
- 1) % PyLong_SHIFT
+ 1))
2238 a_bits
= (a_size
- 1) * PyLong_SHIFT
+ a_bits
;
2240 /* Shift the first DBL_MANT_DIG + 2 bits of a into x_digits[0:x_size]
2241 (shifting left if a_bits <= DBL_MANT_DIG + 2).
2243 Number of digits needed for result: write // for floor division.
2244 Then if shifting left, we end up using
2246 1 + a_size + (DBL_MANT_DIG + 2 - a_bits) // PyLong_SHIFT
2248 digits. If shifting right, we use
2250 a_size - (a_bits - DBL_MANT_DIG - 2) // PyLong_SHIFT
2252 digits. Using a_size = 1 + (a_bits - 1) // PyLong_SHIFT along with
2255 m // PyLong_SHIFT + n // PyLong_SHIFT <= (m + n) // PyLong_SHIFT
2256 m // PyLong_SHIFT - n // PyLong_SHIFT <=
2257 1 + (m - n - 1) // PyLong_SHIFT,
2259 valid for any integers m and n, we find that x_size satisfies
2261 x_size <= 2 + (DBL_MANT_DIG + 1) // PyLong_SHIFT
2265 if (a_bits
<= DBL_MANT_DIG
+ 2) {
2266 shift_digits
= (DBL_MANT_DIG
+ 2 - a_bits
) / PyLong_SHIFT
;
2267 shift_bits
= (DBL_MANT_DIG
+ 2 - a_bits
) % PyLong_SHIFT
;
2269 while (x_size
< shift_digits
)
2270 x_digits
[x_size
++] = 0;
2271 rem
= v_lshift(x_digits
+ x_size
, a
->ob_digit
, a_size
,
2274 x_digits
[x_size
++] = rem
;
2277 shift_digits
= (a_bits
- DBL_MANT_DIG
- 2) / PyLong_SHIFT
;
2278 shift_bits
= (a_bits
- DBL_MANT_DIG
- 2) % PyLong_SHIFT
;
2279 rem
= v_rshift(x_digits
, a
->ob_digit
+ shift_digits
,
2280 a_size
- shift_digits
, (int)shift_bits
);
2281 x_size
= a_size
- shift_digits
;
2282 /* For correct rounding below, we need the least significant
2283 bit of x to be 'sticky' for this shift: if any of the bits
2284 shifted out was nonzero, we set the least significant bit
2289 while (shift_digits
> 0)
2290 if (a
->ob_digit
[--shift_digits
]) {
2295 assert(1 <= x_size
&&
2296 x_size
<= (Py_ssize_t
)(sizeof(x_digits
)/sizeof(digit
)));
2298 /* Round, and convert to double. */
2299 x_digits
[0] += half_even_correction
[x_digits
[0] & 7];
2300 dx
= x_digits
[--x_size
];
2302 dx
= dx
* PyLong_BASE
+ x_digits
[--x_size
];
2304 /* Rescale; make correction if result is 1.0. */
2305 dx
/= 4.0 * EXP2_DBL_MANT_DIG
;
2307 if (a_bits
== PY_SSIZE_T_MAX
)
2314 return Py_SIZE(a
) < 0 ? -dx
: dx
;
2317 /* exponent > PY_SSIZE_T_MAX */
2318 PyErr_SetString(PyExc_OverflowError
,
2319 "huge integer: number of bits overflows a Py_ssize_t");
2324 /* Get a C double from a long int object. Rounds to the nearest double,
2325 using the round-half-to-even rule in the case of a tie. */
2328 PyLong_AsDouble(PyObject
*v
)
2330 Py_ssize_t exponent
;
2333 if (v
== NULL
|| !PyLong_Check(v
)) {
2334 PyErr_BadInternalCall();
2337 x
= _PyLong_Frexp((PyLongObject
*)v
, &exponent
);
2338 if ((x
== -1.0 && PyErr_Occurred()) || exponent
> DBL_MAX_EXP
) {
2339 PyErr_SetString(PyExc_OverflowError
,
2340 "long int too large to convert to float");
2343 return ldexp(x
, (int)exponent
);
2349 long_dealloc(PyObject
*v
)
2351 Py_TYPE(v
)->tp_free(v
);
2355 long_repr(PyObject
*v
)
2357 return _PyLong_Format(v
, 10, 1, 0);
2361 long_str(PyObject
*v
)
2363 return _PyLong_Format(v
, 10, 0, 0);
2367 long_compare(PyLongObject
*a
, PyLongObject
*b
)
2371 if (Py_SIZE(a
) != Py_SIZE(b
)) {
2372 sign
= Py_SIZE(a
) - Py_SIZE(b
);
2375 Py_ssize_t i
= ABS(Py_SIZE(a
));
2376 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2381 sign
= (sdigit
)a
->ob_digit
[i
] - (sdigit
)b
->ob_digit
[i
];
2386 return sign
< 0 ? -1 : sign
> 0 ? 1 : 0;
2390 long_hash(PyLongObject
*v
)
2396 /* This is designed so that Python ints and longs with the
2397 same value hash to the same value, otherwise comparisons
2398 of mapping keys will turn out weird */
2406 /* The following loop produces a C unsigned long x such that x is
2407 congruent to the absolute value of v modulo ULONG_MAX. The
2408 resulting x is nonzero if and only if v is. */
2410 /* Force a native long #-bits (32 or 64) circular shift */
2411 x
= (x
>> (8*SIZEOF_LONG
-PyLong_SHIFT
)) | (x
<< PyLong_SHIFT
);
2412 x
+= v
->ob_digit
[i
];
2413 /* If the addition above overflowed we compensate by
2414 incrementing. This preserves the value modulo
2416 if (x
< v
->ob_digit
[i
])
2420 if (x
== (unsigned long)-1)
2421 x
= (unsigned long)-2;
2426 /* Add the absolute values of two long integers. */
2428 static PyLongObject
*
2429 x_add(PyLongObject
*a
, PyLongObject
*b
)
2431 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2436 /* Ensure a is the larger of the two: */
2437 if (size_a
< size_b
) {
2438 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2439 { Py_ssize_t size_temp
= size_a
;
2441 size_b
= size_temp
; }
2443 z
= _PyLong_New(size_a
+1);
2446 for (i
= 0; i
< size_b
; ++i
) {
2447 carry
+= a
->ob_digit
[i
] + b
->ob_digit
[i
];
2448 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2449 carry
>>= PyLong_SHIFT
;
2451 for (; i
< size_a
; ++i
) {
2452 carry
+= a
->ob_digit
[i
];
2453 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2454 carry
>>= PyLong_SHIFT
;
2456 z
->ob_digit
[i
] = carry
;
2457 return long_normalize(z
);
2460 /* Subtract the absolute values of two integers. */
2462 static PyLongObject
*
2463 x_sub(PyLongObject
*a
, PyLongObject
*b
)
2465 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2471 /* Ensure a is the larger of the two: */
2472 if (size_a
< size_b
) {
2474 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2475 { Py_ssize_t size_temp
= size_a
;
2477 size_b
= size_temp
; }
2479 else if (size_a
== size_b
) {
2480 /* Find highest digit where a and b differ: */
2482 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2485 return _PyLong_New(0);
2486 if (a
->ob_digit
[i
] < b
->ob_digit
[i
]) {
2488 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2490 size_a
= size_b
= i
+1;
2492 z
= _PyLong_New(size_a
);
2495 for (i
= 0; i
< size_b
; ++i
) {
2496 /* The following assumes unsigned arithmetic
2497 works module 2**N for some N>PyLong_SHIFT. */
2498 borrow
= a
->ob_digit
[i
] - b
->ob_digit
[i
] - borrow
;
2499 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2500 borrow
>>= PyLong_SHIFT
;
2501 borrow
&= 1; /* Keep only one sign bit */
2503 for (; i
< size_a
; ++i
) {
2504 borrow
= a
->ob_digit
[i
] - borrow
;
2505 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2506 borrow
>>= PyLong_SHIFT
;
2507 borrow
&= 1; /* Keep only one sign bit */
2509 assert(borrow
== 0);
2511 z
->ob_size
= -(z
->ob_size
);
2512 return long_normalize(z
);
2516 long_add(PyLongObject
*v
, PyLongObject
*w
)
2518 PyLongObject
*a
, *b
, *z
;
2520 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
2522 if (a
->ob_size
< 0) {
2523 if (b
->ob_size
< 0) {
2525 if (z
!= NULL
&& z
->ob_size
!= 0)
2526 z
->ob_size
= -(z
->ob_size
);
2539 return (PyObject
*)z
;
2543 long_sub(PyLongObject
*v
, PyLongObject
*w
)
2545 PyLongObject
*a
, *b
, *z
;
2547 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
2549 if (a
->ob_size
< 0) {
2554 if (z
!= NULL
&& z
->ob_size
!= 0)
2555 z
->ob_size
= -(z
->ob_size
);
2565 return (PyObject
*)z
;
2568 /* Grade school multiplication, ignoring the signs.
2569 * Returns the absolute value of the product, or NULL if error.
2571 static PyLongObject
*
2572 x_mul(PyLongObject
*a
, PyLongObject
*b
)
2575 Py_ssize_t size_a
= ABS(Py_SIZE(a
));
2576 Py_ssize_t size_b
= ABS(Py_SIZE(b
));
2579 z
= _PyLong_New(size_a
+ size_b
);
2583 memset(z
->ob_digit
, 0, Py_SIZE(z
) * sizeof(digit
));
2585 /* Efficient squaring per HAC, Algorithm 14.16:
2586 * http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
2587 * Gives slightly less than a 2x speedup when a == b,
2588 * via exploiting that each entry in the multiplication
2589 * pyramid appears twice (except for the size_a squares).
2591 for (i
= 0; i
< size_a
; ++i
) {
2593 twodigits f
= a
->ob_digit
[i
];
2594 digit
*pz
= z
->ob_digit
+ (i
<< 1);
2595 digit
*pa
= a
->ob_digit
+ i
+ 1;
2596 digit
*paend
= a
->ob_digit
+ size_a
;
2603 carry
= *pz
+ f
* f
;
2604 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2605 carry
>>= PyLong_SHIFT
;
2606 assert(carry
<= PyLong_MASK
);
2608 /* Now f is added in twice in each column of the
2609 * pyramid it appears. Same as adding f<<1 once.
2612 while (pa
< paend
) {
2613 carry
+= *pz
+ *pa
++ * f
;
2614 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2615 carry
>>= PyLong_SHIFT
;
2616 assert(carry
<= (PyLong_MASK
<< 1));
2620 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2621 carry
>>= PyLong_SHIFT
;
2624 *pz
+= (digit
)(carry
& PyLong_MASK
);
2625 assert((carry
>> PyLong_SHIFT
) == 0);
2628 else { /* a is not the same as b -- gradeschool long mult */
2629 for (i
= 0; i
< size_a
; ++i
) {
2630 twodigits carry
= 0;
2631 twodigits f
= a
->ob_digit
[i
];
2632 digit
*pz
= z
->ob_digit
+ i
;
2633 digit
*pb
= b
->ob_digit
;
2634 digit
*pbend
= b
->ob_digit
+ size_b
;
2641 while (pb
< pbend
) {
2642 carry
+= *pz
+ *pb
++ * f
;
2643 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2644 carry
>>= PyLong_SHIFT
;
2645 assert(carry
<= PyLong_MASK
);
2648 *pz
+= (digit
)(carry
& PyLong_MASK
);
2649 assert((carry
>> PyLong_SHIFT
) == 0);
2652 return long_normalize(z
);
2655 /* A helper for Karatsuba multiplication (k_mul).
2656 Takes a long "n" and an integer "size" representing the place to
2657 split, and sets low and high such that abs(n) == (high << size) + low,
2658 viewing the shift as being by digits. The sign bit is ignored, and
2659 the return values are >= 0.
2660 Returns 0 on success, -1 on failure.
2663 kmul_split(PyLongObject
*n
,
2665 PyLongObject
**high
,
2668 PyLongObject
*hi
, *lo
;
2669 Py_ssize_t size_lo
, size_hi
;
2670 const Py_ssize_t size_n
= ABS(Py_SIZE(n
));
2672 size_lo
= MIN(size_n
, size
);
2673 size_hi
= size_n
- size_lo
;
2675 if ((hi
= _PyLong_New(size_hi
)) == NULL
)
2677 if ((lo
= _PyLong_New(size_lo
)) == NULL
) {
2682 memcpy(lo
->ob_digit
, n
->ob_digit
, size_lo
* sizeof(digit
));
2683 memcpy(hi
->ob_digit
, n
->ob_digit
+ size_lo
, size_hi
* sizeof(digit
));
2685 *high
= long_normalize(hi
);
2686 *low
= long_normalize(lo
);
2690 static PyLongObject
*k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
);
2692 /* Karatsuba multiplication. Ignores the input signs, and returns the
2693 * absolute value of the product (or NULL if error).
2694 * See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
2696 static PyLongObject
*
2697 k_mul(PyLongObject
*a
, PyLongObject
*b
)
2699 Py_ssize_t asize
= ABS(Py_SIZE(a
));
2700 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2701 PyLongObject
*ah
= NULL
;
2702 PyLongObject
*al
= NULL
;
2703 PyLongObject
*bh
= NULL
;
2704 PyLongObject
*bl
= NULL
;
2705 PyLongObject
*ret
= NULL
;
2706 PyLongObject
*t1
, *t2
, *t3
;
2707 Py_ssize_t shift
; /* the number of digits we split off */
2710 /* (ah*X+al)(bh*X+bl) = ah*bh*X*X + (ah*bl + al*bh)*X + al*bl
2711 * Let k = (ah+al)*(bh+bl) = ah*bl + al*bh + ah*bh + al*bl
2712 * Then the original product is
2713 * ah*bh*X*X + (k - ah*bh - al*bl)*X + al*bl
2714 * By picking X to be a power of 2, "*X" is just shifting, and it's
2715 * been reduced to 3 multiplies on numbers half the size.
2718 /* We want to split based on the larger number; fiddle so that b
2721 if (asize
> bsize
) {
2731 /* Use gradeschool math when either number is too small. */
2732 i
= a
== b
? KARATSUBA_SQUARE_CUTOFF
: KARATSUBA_CUTOFF
;
2735 return _PyLong_New(0);
2740 /* If a is small compared to b, splitting on b gives a degenerate
2741 * case with ah==0, and Karatsuba may be (even much) less efficient
2742 * than "grade school" then. However, we can still win, by viewing
2743 * b as a string of "big digits", each of width a->ob_size. That
2744 * leads to a sequence of balanced calls to k_mul.
2746 if (2 * asize
<= bsize
)
2747 return k_lopsided_mul(a
, b
);
2749 /* Split a & b into hi & lo pieces. */
2751 if (kmul_split(a
, shift
, &ah
, &al
) < 0) goto fail
;
2752 assert(Py_SIZE(ah
) > 0); /* the split isn't degenerate */
2760 else if (kmul_split(b
, shift
, &bh
, &bl
) < 0) goto fail
;
2763 * 1. Allocate result space (asize + bsize digits: that's always
2765 * 2. Compute ah*bh, and copy into result at 2*shift.
2766 * 3. Compute al*bl, and copy into result at 0. Note that this
2767 * can't overlap with #2.
2768 * 4. Subtract al*bl from the result, starting at shift. This may
2769 * underflow (borrow out of the high digit), but we don't care:
2770 * we're effectively doing unsigned arithmetic mod
2771 * PyLong_BASE**(sizea + sizeb), and so long as the *final* result fits,
2772 * borrows and carries out of the high digit can be ignored.
2773 * 5. Subtract ah*bh from the result, starting at shift.
2774 * 6. Compute (ah+al)*(bh+bl), and add it into the result starting
2778 /* 1. Allocate result space. */
2779 ret
= _PyLong_New(asize
+ bsize
);
2780 if (ret
== NULL
) goto fail
;
2782 /* Fill with trash, to catch reference to uninitialized digits. */
2783 memset(ret
->ob_digit
, 0xDF, Py_SIZE(ret
) * sizeof(digit
));
2786 /* 2. t1 <- ah*bh, and copy into high digits of result. */
2787 if ((t1
= k_mul(ah
, bh
)) == NULL
) goto fail
;
2788 assert(Py_SIZE(t1
) >= 0);
2789 assert(2*shift
+ Py_SIZE(t1
) <= Py_SIZE(ret
));
2790 memcpy(ret
->ob_digit
+ 2*shift
, t1
->ob_digit
,
2791 Py_SIZE(t1
) * sizeof(digit
));
2793 /* Zero-out the digits higher than the ah*bh copy. */
2794 i
= Py_SIZE(ret
) - 2*shift
- Py_SIZE(t1
);
2796 memset(ret
->ob_digit
+ 2*shift
+ Py_SIZE(t1
), 0,
2799 /* 3. t2 <- al*bl, and copy into the low digits. */
2800 if ((t2
= k_mul(al
, bl
)) == NULL
) {
2804 assert(Py_SIZE(t2
) >= 0);
2805 assert(Py_SIZE(t2
) <= 2*shift
); /* no overlap with high digits */
2806 memcpy(ret
->ob_digit
, t2
->ob_digit
, Py_SIZE(t2
) * sizeof(digit
));
2808 /* Zero out remaining digits. */
2809 i
= 2*shift
- Py_SIZE(t2
); /* number of uninitialized digits */
2811 memset(ret
->ob_digit
+ Py_SIZE(t2
), 0, i
* sizeof(digit
));
2813 /* 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
2814 * because it's fresher in cache.
2816 i
= Py_SIZE(ret
) - shift
; /* # digits after shift */
2817 (void)v_isub(ret
->ob_digit
+ shift
, i
, t2
->ob_digit
, Py_SIZE(t2
));
2820 (void)v_isub(ret
->ob_digit
+ shift
, i
, t1
->ob_digit
, Py_SIZE(t1
));
2823 /* 6. t3 <- (ah+al)(bh+bl), and add into result. */
2824 if ((t1
= x_add(ah
, al
)) == NULL
) goto fail
;
2833 else if ((t2
= x_add(bh
, bl
)) == NULL
) {
2844 if (t3
== NULL
) goto fail
;
2845 assert(Py_SIZE(t3
) >= 0);
2847 /* Add t3. It's not obvious why we can't run out of room here.
2848 * See the (*) comment after this function.
2850 (void)v_iadd(ret
->ob_digit
+ shift
, i
, t3
->ob_digit
, Py_SIZE(t3
));
2853 return long_normalize(ret
);
2864 /* (*) Why adding t3 can't "run out of room" above.
2866 Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
2869 1. For any integer i, i = c(i/2) + f(i/2). In particular,
2870 bsize = c(bsize/2) + f(bsize/2).
2871 2. shift = f(bsize/2)
2873 4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
2874 routine, so asize > bsize/2 >= f(bsize/2) in this routine.
2876 We allocated asize + bsize result digits, and add t3 into them at an offset
2877 of shift. This leaves asize+bsize-shift allocated digit positions for t3
2878 to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
2879 asize + c(bsize/2) available digit positions.
2881 bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
2882 at most c(bsize/2) digits + 1 bit.
2884 If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
2885 digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
2886 most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
2888 The product (ah+al)*(bh+bl) therefore has at most
2890 c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
2892 and we have asize + c(bsize/2) available digit positions. We need to show
2893 this is always enough. An instance of c(bsize/2) cancels out in both, so
2894 the question reduces to whether asize digits is enough to hold
2895 (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
2896 then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
2897 asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
2898 digit is enough to hold 2 bits. This is so since PyLong_SHIFT=15 >= 2. If
2899 asize == bsize, then we're asking whether bsize digits is enough to hold
2900 c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
2901 is enough to hold 2 bits. This is so if bsize >= 2, which holds because
2902 bsize >= KARATSUBA_CUTOFF >= 2.
2904 Note that since there's always enough room for (ah+al)*(bh+bl), and that's
2905 clearly >= each of ah*bh and al*bl, there's always enough room to subtract
2906 ah*bh and al*bl too.
2909 /* b has at least twice the digits of a, and a is big enough that Karatsuba
2910 * would pay off *if* the inputs had balanced sizes. View b as a sequence
2911 * of slices, each with a->ob_size digits, and multiply the slices by a,
2912 * one at a time. This gives k_mul balanced inputs to work with, and is
2913 * also cache-friendly (we compute one double-width slice of the result
2914 * at a time, then move on, never backtracking except for the helpful
2915 * single-width slice overlap between successive partial sums).
2917 static PyLongObject
*
2918 k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
)
2920 const Py_ssize_t asize
= ABS(Py_SIZE(a
));
2921 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2922 Py_ssize_t nbdone
; /* # of b digits already multiplied */
2924 PyLongObject
*bslice
= NULL
;
2926 assert(asize
> KARATSUBA_CUTOFF
);
2927 assert(2 * asize
<= bsize
);
2929 /* Allocate result space, and zero it out. */
2930 ret
= _PyLong_New(asize
+ bsize
);
2933 memset(ret
->ob_digit
, 0, Py_SIZE(ret
) * sizeof(digit
));
2935 /* Successive slices of b are copied into bslice. */
2936 bslice
= _PyLong_New(asize
);
2942 PyLongObject
*product
;
2943 const Py_ssize_t nbtouse
= MIN(bsize
, asize
);
2945 /* Multiply the next slice of b by a. */
2946 memcpy(bslice
->ob_digit
, b
->ob_digit
+ nbdone
,
2947 nbtouse
* sizeof(digit
));
2948 Py_SIZE(bslice
) = nbtouse
;
2949 product
= k_mul(a
, bslice
);
2950 if (product
== NULL
)
2953 /* Add into result. */
2954 (void)v_iadd(ret
->ob_digit
+ nbdone
, Py_SIZE(ret
) - nbdone
,
2955 product
->ob_digit
, Py_SIZE(product
));
2963 return long_normalize(ret
);
2972 long_mul(PyLongObject
*v
, PyLongObject
*w
)
2974 PyLongObject
*a
, *b
, *z
;
2976 if (!convert_binop((PyObject
*)v
, (PyObject
*)w
, &a
, &b
)) {
2977 Py_INCREF(Py_NotImplemented
);
2978 return Py_NotImplemented
;
2982 /* Negate if exactly one of the inputs is negative. */
2983 if (((a
->ob_size
^ b
->ob_size
) < 0) && z
)
2984 z
->ob_size
= -(z
->ob_size
);
2987 return (PyObject
*)z
;
2990 /* The / and % operators are now defined in terms of divmod().
2991 The expression a mod b has the value a - b*floor(a/b).
2992 The long_divrem function gives the remainder after division of
2993 |a| by |b|, with the sign of a. This is also expressed
2994 as a - b*trunc(a/b), if trunc truncates towards zero.
3001 So, to get from rem to mod, we have to add b if a and b
3002 have different signs. We then subtract one from the 'div'
3003 part of the outcome to keep the invariant intact. */
3006 * *pdiv, *pmod = divmod(v, w)
3007 * NULL can be passed for pdiv or pmod, in which case that part of
3008 * the result is simply thrown away. The caller owns a reference to
3009 * each of these it requests (does not pass NULL for).
3012 l_divmod(PyLongObject
*v
, PyLongObject
*w
,
3013 PyLongObject
**pdiv
, PyLongObject
**pmod
)
3015 PyLongObject
*div
, *mod
;
3017 if (long_divrem(v
, w
, &div
, &mod
) < 0)
3019 if ((Py_SIZE(mod
) < 0 && Py_SIZE(w
) > 0) ||
3020 (Py_SIZE(mod
) > 0 && Py_SIZE(w
) < 0)) {
3023 temp
= (PyLongObject
*) long_add(mod
, w
);
3030 one
= (PyLongObject
*) PyLong_FromLong(1L);
3032 (temp
= (PyLongObject
*) long_sub(div
, one
)) == NULL
) {
3056 long_div(PyObject
*v
, PyObject
*w
)
3058 PyLongObject
*a
, *b
, *div
;
3060 CONVERT_BINOP(v
, w
, &a
, &b
);
3061 if (l_divmod(a
, b
, &div
, NULL
) < 0)
3065 return (PyObject
*)div
;
3069 long_classic_div(PyObject
*v
, PyObject
*w
)
3071 PyLongObject
*a
, *b
, *div
;
3073 CONVERT_BINOP(v
, w
, &a
, &b
);
3074 if (Py_DivisionWarningFlag
&&
3075 PyErr_Warn(PyExc_DeprecationWarning
, "classic long division") < 0)
3077 else if (l_divmod(a
, b
, &div
, NULL
) < 0)
3081 return (PyObject
*)div
;
3084 /* PyLong/PyLong -> float, with correctly rounded result. */
3086 #define MANT_DIG_DIGITS (DBL_MANT_DIG / PyLong_SHIFT)
3087 #define MANT_DIG_BITS (DBL_MANT_DIG % PyLong_SHIFT)
3090 long_true_divide(PyObject
*v
, PyObject
*w
)
3092 PyLongObject
*a
, *b
, *x
;
3093 Py_ssize_t a_size
, b_size
, shift
, extra_bits
, diff
, x_size
, x_bits
;
3095 int inexact
, negate
, a_is_small
, b_is_small
;
3098 CONVERT_BINOP(v
, w
, &a
, &b
);
3101 Method in a nutshell:
3103 0. reduce to case a, b > 0; filter out obvious underflow/overflow
3104 1. choose a suitable integer 'shift'
3105 2. use integer arithmetic to compute x = floor(2**-shift*a/b)
3106 3. adjust x for correct rounding
3107 4. convert x to a double dx with the same value
3108 5. return ldexp(dx, shift).
3112 0. For any a, a/0 raises ZeroDivisionError; for nonzero b, 0/b
3113 returns either 0.0 or -0.0, depending on the sign of b. For a and
3114 b both nonzero, ignore signs of a and b, and add the sign back in
3115 at the end. Now write a_bits and b_bits for the bit lengths of a
3116 and b respectively (that is, a_bits = 1 + floor(log_2(a)); likewise
3119 2**(a_bits - b_bits - 1) < a/b < 2**(a_bits - b_bits + 1).
3121 So if a_bits - b_bits > DBL_MAX_EXP then a/b > 2**DBL_MAX_EXP and
3122 so overflows. Similarly, if a_bits - b_bits < DBL_MIN_EXP -
3123 DBL_MANT_DIG - 1 then a/b underflows to 0. With these cases out of
3124 the way, we can assume that
3126 DBL_MIN_EXP - DBL_MANT_DIG - 1 <= a_bits - b_bits <= DBL_MAX_EXP.
3128 1. The integer 'shift' is chosen so that x has the right number of
3129 bits for a double, plus two or three extra bits that will be used
3130 in the rounding decisions. Writing a_bits and b_bits for the
3131 number of significant bits in a and b respectively, a
3132 straightforward formula for shift is:
3134 shift = a_bits - b_bits - DBL_MANT_DIG - 2
3136 This is fine in the usual case, but if a/b is smaller than the
3137 smallest normal float then it can lead to double rounding on an
3138 IEEE 754 platform, giving incorrectly rounded results. So we
3139 adjust the formula slightly. The actual formula used is:
3141 shift = MAX(a_bits - b_bits, DBL_MIN_EXP) - DBL_MANT_DIG - 2
3143 2. The quantity x is computed by first shifting a (left -shift bits
3144 if shift <= 0, right shift bits if shift > 0) and then dividing by
3145 b. For both the shift and the division, we keep track of whether
3146 the result is inexact, in a flag 'inexact'; this information is
3147 needed at the rounding stage.
3149 With the choice of shift above, together with our assumption that
3150 a_bits - b_bits >= DBL_MIN_EXP - DBL_MANT_DIG - 1, it follows
3153 3. Now x * 2**shift <= a/b < (x+1) * 2**shift. We want to replace
3154 this with an exactly representable float of the form
3156 round(x/2**extra_bits) * 2**(extra_bits+shift).
3158 For float representability, we need x/2**extra_bits <
3159 2**DBL_MANT_DIG and extra_bits + shift >= DBL_MIN_EXP -
3160 DBL_MANT_DIG. This translates to the condition:
3162 extra_bits >= MAX(x_bits, DBL_MIN_EXP - shift) - DBL_MANT_DIG
3164 To round, we just modify the bottom digit of x in-place; this can
3165 end up giving a digit with value > PyLONG_MASK, but that's not a
3166 problem since digits can hold values up to 2*PyLONG_MASK+1.
3168 With the original choices for shift above, extra_bits will always
3169 be 2 or 3. Then rounding under the round-half-to-even rule, we
3170 round up iff the most significant of the extra bits is 1, and
3171 either: (a) the computation of x in step 2 had an inexact result,
3172 or (b) at least one other of the extra bits is 1, or (c) the least
3173 significant bit of x (above those to be rounded) is 1.
3175 4. Conversion to a double is straightforward; all floating-point
3176 operations involved in the conversion are exact, so there's no
3177 danger of rounding errors.
3179 5. Use ldexp(x, shift) to compute x*2**shift, the final result.
3180 The result will always be exactly representable as a double, except
3181 in the case that it overflows. To avoid dependence on the exact
3182 behaviour of ldexp on overflow, we check for overflow before
3183 applying ldexp. The result of ldexp is adjusted for sign before
3187 /* Reduce to case where a and b are both positive. */
3188 a_size
= ABS(Py_SIZE(a
));
3189 b_size
= ABS(Py_SIZE(b
));
3190 negate
= (Py_SIZE(a
) < 0) ^ (Py_SIZE(b
) < 0);
3192 PyErr_SetString(PyExc_ZeroDivisionError
,
3193 "division by zero");
3197 goto underflow_or_zero
;
3199 /* Fast path for a and b small (exactly representable in a double).
3200 Relies on floating-point division being correctly rounded; results
3201 may be subject to double rounding on x86 machines that operate with
3202 the x87 FPU set to 64-bit precision. */
3203 a_is_small
= a_size
<= MANT_DIG_DIGITS
||
3204 (a_size
== MANT_DIG_DIGITS
+1 &&
3205 a
->ob_digit
[MANT_DIG_DIGITS
] >> MANT_DIG_BITS
== 0);
3206 b_is_small
= b_size
<= MANT_DIG_DIGITS
||
3207 (b_size
== MANT_DIG_DIGITS
+1 &&
3208 b
->ob_digit
[MANT_DIG_DIGITS
] >> MANT_DIG_BITS
== 0);
3209 if (a_is_small
&& b_is_small
) {
3211 da
= a
->ob_digit
[--a_size
];
3213 da
= da
* PyLong_BASE
+ a
->ob_digit
[--a_size
];
3214 db
= b
->ob_digit
[--b_size
];
3216 db
= db
* PyLong_BASE
+ b
->ob_digit
[--b_size
];
3221 /* Catch obvious cases of underflow and overflow */
3222 diff
= a_size
- b_size
;
3223 if (diff
> PY_SSIZE_T_MAX
/PyLong_SHIFT
- 1)
3224 /* Extreme overflow */
3226 else if (diff
< 1 - PY_SSIZE_T_MAX
/PyLong_SHIFT
)
3227 /* Extreme underflow */
3228 goto underflow_or_zero
;
3229 /* Next line is now safe from overflowing a Py_ssize_t */
3230 diff
= diff
* PyLong_SHIFT
+ bits_in_digit(a
->ob_digit
[a_size
- 1]) -
3231 bits_in_digit(b
->ob_digit
[b_size
- 1]);
3232 /* Now diff = a_bits - b_bits. */
3233 if (diff
> DBL_MAX_EXP
)
3235 else if (diff
< DBL_MIN_EXP
- DBL_MANT_DIG
- 1)
3236 goto underflow_or_zero
;
3238 /* Choose value for shift; see comments for step 1 above. */
3239 shift
= MAX(diff
, DBL_MIN_EXP
) - DBL_MANT_DIG
- 2;
3243 /* x = abs(a * 2**-shift) */
3245 Py_ssize_t i
, shift_digits
= -shift
/ PyLong_SHIFT
;
3247 /* x = a << -shift */
3248 if (a_size
>= PY_SSIZE_T_MAX
- 1 - shift_digits
) {
3249 /* In practice, it's probably impossible to end up
3250 here. Both a and b would have to be enormous,
3251 using close to SIZE_T_MAX bytes of memory each. */
3252 PyErr_SetString(PyExc_OverflowError
,
3253 "intermediate overflow during division");
3256 x
= _PyLong_New(a_size
+ shift_digits
+ 1);
3259 for (i
= 0; i
< shift_digits
; i
++)
3261 rem
= v_lshift(x
->ob_digit
+ shift_digits
, a
->ob_digit
,
3262 a_size
, -shift
% PyLong_SHIFT
);
3263 x
->ob_digit
[a_size
+ shift_digits
] = rem
;
3266 Py_ssize_t shift_digits
= shift
/ PyLong_SHIFT
;
3268 /* x = a >> shift */
3269 assert(a_size
>= shift_digits
);
3270 x
= _PyLong_New(a_size
- shift_digits
);
3273 rem
= v_rshift(x
->ob_digit
, a
->ob_digit
+ shift_digits
,
3274 a_size
- shift_digits
, shift
% PyLong_SHIFT
);
3275 /* set inexact if any of the bits shifted out is nonzero */
3278 while (!inexact
&& shift_digits
> 0)
3279 if (a
->ob_digit
[--shift_digits
])
3283 x_size
= Py_SIZE(x
);
3285 /* x //= b. If the remainder is nonzero, set inexact. We own the only
3286 reference to x, so it's safe to modify it in-place. */
3288 digit rem
= inplace_divrem1(x
->ob_digit
, x
->ob_digit
, x_size
,
3295 PyLongObject
*div
, *rem
;
3296 div
= x_divrem(x
, b
, &rem
);
3305 x_size
= ABS(Py_SIZE(x
));
3306 assert(x_size
> 0); /* result of division is never zero */
3307 x_bits
= (x_size
-1)*PyLong_SHIFT
+bits_in_digit(x
->ob_digit
[x_size
-1]);
3309 /* The number of extra bits that have to be rounded away. */
3310 extra_bits
= MAX(x_bits
, DBL_MIN_EXP
- shift
) - DBL_MANT_DIG
;
3311 assert(extra_bits
== 2 || extra_bits
== 3);
3313 /* Round by directly modifying the low digit of x. */
3314 mask
= (digit
)1 << (extra_bits
- 1);
3315 low
= x
->ob_digit
[0] | inexact
;
3316 if (low
& mask
&& low
& (3*mask
-1))
3318 x
->ob_digit
[0] = low
& ~(mask
-1U);
3320 /* Convert x to a double dx; the conversion is exact. */
3321 dx
= x
->ob_digit
[--x_size
];
3323 dx
= dx
* PyLong_BASE
+ x
->ob_digit
[--x_size
];
3326 /* Check whether ldexp result will overflow a double. */
3327 if (shift
+ x_bits
>= DBL_MAX_EXP
&&
3328 (shift
+ x_bits
> DBL_MAX_EXP
|| dx
== ldexp(1.0, (int)x_bits
)))
3330 result
= ldexp(dx
, (int)shift
);
3335 return PyFloat_FromDouble(negate
? -result
: result
);
3340 return PyFloat_FromDouble(negate
? -0.0 : 0.0);
3343 PyErr_SetString(PyExc_OverflowError
,
3344 "integer division result too large for a float");
3352 long_mod(PyObject
*v
, PyObject
*w
)
3354 PyLongObject
*a
, *b
, *mod
;
3356 CONVERT_BINOP(v
, w
, &a
, &b
);
3358 if (l_divmod(a
, b
, NULL
, &mod
) < 0)
3362 return (PyObject
*)mod
;
3366 long_divmod(PyObject
*v
, PyObject
*w
)
3368 PyLongObject
*a
, *b
, *div
, *mod
;
3371 CONVERT_BINOP(v
, w
, &a
, &b
);
3373 if (l_divmod(a
, b
, &div
, &mod
) < 0) {
3380 PyTuple_SetItem(z
, 0, (PyObject
*) div
);
3381 PyTuple_SetItem(z
, 1, (PyObject
*) mod
);
3394 long_pow(PyObject
*v
, PyObject
*w
, PyObject
*x
)
3396 PyLongObject
*a
, *b
, *c
; /* a,b,c = v,w,x */
3397 int negativeOutput
= 0; /* if x<0 return negative output */
3399 PyLongObject
*z
= NULL
; /* accumulated result */
3400 Py_ssize_t i
, j
, k
; /* counters */
3401 PyLongObject
*temp
= NULL
;
3403 /* 5-ary values. If the exponent is large enough, table is
3404 * precomputed so that table[i] == a**i % c for i in range(32).
3406 PyLongObject
*table
[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3407 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3409 /* a, b, c = v, w, x */
3410 CONVERT_BINOP(v
, w
, &a
, &b
);
3411 if (PyLong_Check(x
)) {
3412 c
= (PyLongObject
*)x
;
3415 else if (PyInt_Check(x
)) {
3416 c
= (PyLongObject
*)PyLong_FromLong(PyInt_AS_LONG(x
));
3420 else if (x
== Py_None
)
3425 Py_INCREF(Py_NotImplemented
);
3426 return Py_NotImplemented
;
3429 if (Py_SIZE(b
) < 0) { /* if exponent is negative */
3431 PyErr_SetString(PyExc_TypeError
, "pow() 2nd argument "
3432 "cannot be negative when 3rd argument specified");
3436 /* else return a float. This works because we know
3437 that this calls float_pow() which converts its
3438 arguments to double. */
3441 return PyFloat_Type
.tp_as_number
->nb_power(v
, w
, x
);
3447 raise ValueError() */
3448 if (Py_SIZE(c
) == 0) {
3449 PyErr_SetString(PyExc_ValueError
,
3450 "pow() 3rd argument cannot be 0");
3455 negativeOutput = True
3456 modulus = -modulus */
3457 if (Py_SIZE(c
) < 0) {
3459 temp
= (PyLongObject
*)_PyLong_Copy(c
);
3465 c
->ob_size
= - c
->ob_size
;
3470 if ((Py_SIZE(c
) == 1) && (c
->ob_digit
[0] == 1)) {
3471 z
= (PyLongObject
*)PyLong_FromLong(0L);
3476 base = base % modulus
3477 Having the base positive just makes things easier. */
3478 if (Py_SIZE(a
) < 0) {
3479 if (l_divmod(a
, c
, NULL
, &temp
) < 0)
3487 /* At this point a, b, and c are guaranteed non-negative UNLESS
3488 c is NULL, in which case a may be negative. */
3490 z
= (PyLongObject
*)PyLong_FromLong(1L);
3494 /* Perform a modular reduction, X = X % c, but leave X alone if c
3500 if (l_divmod(X, c, NULL, &temp) < 0) \
3508 /* Multiply two values, then reduce the result:
3509 result = X*Y % c. If c is NULL, skip the mod. */
3510 #define MULT(X, Y, result) \
3512 temp = (PyLongObject *)long_mul(X, Y); \
3515 Py_XDECREF(result); \
3521 if (Py_SIZE(b
) <= FIVEARY_CUTOFF
) {
3522 /* Left-to-right binary exponentiation (HAC Algorithm 14.79) */
3523 /* http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf */
3524 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3525 digit bi
= b
->ob_digit
[i
];
3527 for (j
= (digit
)1 << (PyLong_SHIFT
-1); j
!= 0; j
>>= 1) {
3535 /* Left-to-right 5-ary exponentiation (HAC Algorithm 14.82) */
3536 Py_INCREF(z
); /* still holds 1L */
3538 for (i
= 1; i
< 32; ++i
)
3539 MULT(table
[i
-1], a
, table
[i
]);
3541 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3542 const digit bi
= b
->ob_digit
[i
];
3544 for (j
= PyLong_SHIFT
- 5; j
>= 0; j
-= 5) {
3545 const int index
= (bi
>> j
) & 0x1f;
3546 for (k
= 0; k
< 5; ++k
)
3549 MULT(z
, table
[index
], z
);
3554 if (negativeOutput
&& (Py_SIZE(z
) != 0)) {
3555 temp
= (PyLongObject
*)long_sub(z
, c
);
3571 if (Py_SIZE(b
) > FIVEARY_CUTOFF
) {
3572 for (i
= 0; i
< 32; ++i
)
3573 Py_XDECREF(table
[i
]);
3579 return (PyObject
*)z
;
3583 long_invert(PyLongObject
*v
)
3585 /* Implement ~x as -(x+1) */
3588 w
= (PyLongObject
*)PyLong_FromLong(1L);
3591 x
= (PyLongObject
*) long_add(v
, w
);
3595 Py_SIZE(x
) = -(Py_SIZE(x
));
3596 return (PyObject
*)x
;
3600 long_neg(PyLongObject
*v
)
3603 if (v
->ob_size
== 0 && PyLong_CheckExact(v
)) {
3606 return (PyObject
*) v
;
3608 z
= (PyLongObject
*)_PyLong_Copy(v
);
3610 z
->ob_size
= -(v
->ob_size
);
3611 return (PyObject
*)z
;
3615 long_abs(PyLongObject
*v
)
3620 return long_long((PyObject
*)v
);
3624 long_nonzero(PyLongObject
*v
)
3626 return Py_SIZE(v
) != 0;
3630 long_rshift(PyLongObject
*v
, PyLongObject
*w
)
3632 PyLongObject
*a
, *b
;
3633 PyLongObject
*z
= NULL
;
3634 Py_ssize_t shiftby
, newsize
, wordshift
, loshift
, hishift
, i
, j
;
3635 digit lomask
, himask
;
3637 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
3639 if (Py_SIZE(a
) < 0) {
3640 /* Right shifting negative numbers is harder */
3641 PyLongObject
*a1
, *a2
;
3642 a1
= (PyLongObject
*) long_invert(a
);
3645 a2
= (PyLongObject
*) long_rshift(a1
, b
);
3649 z
= (PyLongObject
*) long_invert(a2
);
3653 shiftby
= PyLong_AsSsize_t((PyObject
*)b
);
3654 if (shiftby
== -1L && PyErr_Occurred())
3657 PyErr_SetString(PyExc_ValueError
,
3658 "negative shift count");
3661 wordshift
= shiftby
/ PyLong_SHIFT
;
3662 newsize
= ABS(Py_SIZE(a
)) - wordshift
;
3667 return (PyObject
*)z
;
3669 loshift
= shiftby
% PyLong_SHIFT
;
3670 hishift
= PyLong_SHIFT
- loshift
;
3671 lomask
= ((digit
)1 << hishift
) - 1;
3672 himask
= PyLong_MASK
^ lomask
;
3673 z
= _PyLong_New(newsize
);
3677 Py_SIZE(z
) = -(Py_SIZE(z
));
3678 for (i
= 0, j
= wordshift
; i
< newsize
; i
++, j
++) {
3679 z
->ob_digit
[i
] = (a
->ob_digit
[j
] >> loshift
) & lomask
;
3681 z
->ob_digit
[i
] |= (a
->ob_digit
[j
+1] << hishift
) & himask
;
3683 z
= long_normalize(z
);
3688 return (PyObject
*) z
;
3693 long_lshift(PyObject
*v
, PyObject
*w
)
3695 /* This version due to Tim Peters */
3696 PyLongObject
*a
, *b
;
3697 PyLongObject
*z
= NULL
;
3698 Py_ssize_t shiftby
, oldsize
, newsize
, wordshift
, remshift
, i
, j
;
3701 CONVERT_BINOP(v
, w
, &a
, &b
);
3703 shiftby
= PyLong_AsSsize_t((PyObject
*)b
);
3704 if (shiftby
== -1L && PyErr_Occurred())
3707 PyErr_SetString(PyExc_ValueError
, "negative shift count");
3710 /* wordshift, remshift = divmod(shiftby, PyLong_SHIFT) */
3711 wordshift
= shiftby
/ PyLong_SHIFT
;
3712 remshift
= shiftby
- wordshift
* PyLong_SHIFT
;
3714 oldsize
= ABS(a
->ob_size
);
3715 newsize
= oldsize
+ wordshift
;
3718 z
= _PyLong_New(newsize
);
3722 z
->ob_size
= -(z
->ob_size
);
3723 for (i
= 0; i
< wordshift
; i
++)
3726 for (i
= wordshift
, j
= 0; j
< oldsize
; i
++, j
++) {
3727 accum
|= (twodigits
)a
->ob_digit
[j
] << remshift
;
3728 z
->ob_digit
[i
] = (digit
)(accum
& PyLong_MASK
);
3729 accum
>>= PyLong_SHIFT
;
3732 z
->ob_digit
[newsize
-1] = (digit
)accum
;
3735 z
= long_normalize(z
);
3739 return (PyObject
*) z
;
3742 /* Compute two's complement of digit vector a[0:m], writing result to
3743 z[0:m]. The digit vector a need not be normalized, but should not
3744 be entirely zero. a and z may point to the same digit vector. */
3747 v_complement(digit
*z
, digit
*a
, Py_ssize_t m
)
3751 for (i
= 0; i
< m
; ++i
) {
3752 carry
+= a
[i
] ^ PyLong_MASK
;
3753 z
[i
] = carry
& PyLong_MASK
;
3754 carry
>>= PyLong_SHIFT
;
3759 /* Bitwise and/xor/or operations */
3762 long_bitwise(PyLongObject
*a
,
3763 int op
, /* '&', '|', '^' */
3766 int nega
, negb
, negz
;
3767 Py_ssize_t size_a
, size_b
, size_z
, i
;
3770 /* Bitwise operations for negative numbers operate as though
3771 on a two's complement representation. So convert arguments
3772 from sign-magnitude to two's complement, and convert the
3773 result back to sign-magnitude at the end. */
3775 /* If a is negative, replace it by its two's complement. */
3776 size_a
= ABS(Py_SIZE(a
));
3777 nega
= Py_SIZE(a
) < 0;
3779 z
= _PyLong_New(size_a
);
3782 v_complement(z
->ob_digit
, a
->ob_digit
, size_a
);
3786 /* Keep reference count consistent. */
3790 size_b
= ABS(Py_SIZE(b
));
3791 negb
= Py_SIZE(b
) < 0;
3793 z
= _PyLong_New(size_b
);
3798 v_complement(z
->ob_digit
, b
->ob_digit
, size_b
);
3804 /* Swap a and b if necessary to ensure size_a >= size_b. */
3805 if (size_a
< size_b
) {
3806 z
= a
; a
= b
; b
= z
;
3807 size_z
= size_a
; size_a
= size_b
; size_b
= size_z
;
3808 negz
= nega
; nega
= negb
; negb
= negz
;
3811 /* JRH: The original logic here was to allocate the result value (z)
3812 as the longer of the two operands. However, there are some cases
3813 where the result is guaranteed to be shorter than that: AND of two
3814 positives, OR of two negatives: use the shorter number. AND with
3815 mixed signs: use the positive number. OR with mixed signs: use the
3825 size_z
= negb
? size_a
: size_b
;
3829 size_z
= negb
? size_b
: size_a
;
3832 PyErr_BadArgument();
3836 /* We allow an extra digit if z is negative, to make sure that
3837 the final two's complement of z doesn't overflow. */
3838 z
= _PyLong_New(size_z
+ negz
);
3845 /* Compute digits for overlap of a and b. */
3848 for (i
= 0; i
< size_b
; ++i
)
3849 z
->ob_digit
[i
] = a
->ob_digit
[i
] & b
->ob_digit
[i
];
3852 for (i
= 0; i
< size_b
; ++i
)
3853 z
->ob_digit
[i
] = a
->ob_digit
[i
] | b
->ob_digit
[i
];
3856 for (i
= 0; i
< size_b
; ++i
)
3857 z
->ob_digit
[i
] = a
->ob_digit
[i
] ^ b
->ob_digit
[i
];
3860 PyErr_BadArgument();
3864 /* Copy any remaining digits of a, inverting if necessary. */
3865 if (op
== '^' && negb
)
3866 for (; i
< size_z
; ++i
)
3867 z
->ob_digit
[i
] = a
->ob_digit
[i
] ^ PyLong_MASK
;
3868 else if (i
< size_z
)
3869 memcpy(&z
->ob_digit
[i
], &a
->ob_digit
[i
],
3870 (size_z
-i
)*sizeof(digit
));
3872 /* Complement result if negative. */
3874 Py_SIZE(z
) = -(Py_SIZE(z
));
3875 z
->ob_digit
[size_z
] = PyLong_MASK
;
3876 v_complement(z
->ob_digit
, z
->ob_digit
, size_z
+1);
3881 return (PyObject
*)long_normalize(z
);
3885 long_and(PyObject
*v
, PyObject
*w
)
3887 PyLongObject
*a
, *b
;
3889 CONVERT_BINOP(v
, w
, &a
, &b
);
3890 c
= long_bitwise(a
, '&', b
);
3897 long_xor(PyObject
*v
, PyObject
*w
)
3899 PyLongObject
*a
, *b
;
3901 CONVERT_BINOP(v
, w
, &a
, &b
);
3902 c
= long_bitwise(a
, '^', b
);
3909 long_or(PyObject
*v
, PyObject
*w
)
3911 PyLongObject
*a
, *b
;
3913 CONVERT_BINOP(v
, w
, &a
, &b
);
3914 c
= long_bitwise(a
, '|', b
);
3921 long_coerce(PyObject
**pv
, PyObject
**pw
)
3923 if (PyInt_Check(*pw
)) {
3924 *pw
= PyLong_FromLong(PyInt_AS_LONG(*pw
));
3930 else if (PyLong_Check(*pw
)) {
3935 return 1; /* Can't do it */
3939 long_long(PyObject
*v
)
3941 if (PyLong_CheckExact(v
))
3944 v
= _PyLong_Copy((PyLongObject
*)v
);
3949 long_int(PyObject
*v
)
3952 x
= PyLong_AsLong(v
);
3953 if (PyErr_Occurred()) {
3954 if (PyErr_ExceptionMatches(PyExc_OverflowError
)) {
3956 if (PyLong_CheckExact(v
)) {
3961 return _PyLong_Copy((PyLongObject
*)v
);
3966 return PyInt_FromLong(x
);
3970 long_float(PyObject
*v
)
3973 result
= PyLong_AsDouble(v
);
3974 if (result
== -1.0 && PyErr_Occurred())
3976 return PyFloat_FromDouble(result
);
3980 long_oct(PyObject
*v
)
3982 return _PyLong_Format(v
, 8, 1, 0);
3986 long_hex(PyObject
*v
)
3988 return _PyLong_Format(v
, 16, 1, 0);
3992 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
);
3995 long_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
3998 int base
= -909; /* unlikely! */
3999 static char *kwlist
[] = {"x", "base", 0};
4001 if (type
!= &PyLong_Type
)
4002 return long_subtype_new(type
, args
, kwds
); /* Wimp out */
4003 if (!PyArg_ParseTupleAndKeywords(args
, kwds
, "|Oi:long", kwlist
,
4007 return PyLong_FromLong(0L);
4009 return PyNumber_Long(x
);
4010 else if (PyString_Check(x
)) {
4011 /* Since PyLong_FromString doesn't have a length parameter,
4012 * check here for possible NULs in the string. */
4013 char *string
= PyString_AS_STRING(x
);
4014 if (strlen(string
) != (size_t)PyString_Size(x
)) {
4015 /* create a repr() of the input string,
4016 * just like PyLong_FromString does. */
4018 srepr
= PyObject_Repr(x
);
4021 PyErr_Format(PyExc_ValueError
,
4022 "invalid literal for long() with base %d: %s",
4023 base
, PyString_AS_STRING(srepr
));
4027 return PyLong_FromString(PyString_AS_STRING(x
), NULL
, base
);
4029 #ifdef Py_USING_UNICODE
4030 else if (PyUnicode_Check(x
))
4031 return PyLong_FromUnicode(PyUnicode_AS_UNICODE(x
),
4032 PyUnicode_GET_SIZE(x
),
4036 PyErr_SetString(PyExc_TypeError
,
4037 "long() can't convert non-string with explicit base");
4042 /* Wimpy, slow approach to tp_new calls for subtypes of long:
4043 first create a regular long from whatever arguments we got,
4044 then allocate a subtype instance and initialize it from
4045 the regular long. The regular long is then thrown away.
4048 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
4050 PyLongObject
*tmp
, *newobj
;
4053 assert(PyType_IsSubtype(type
, &PyLong_Type
));
4054 tmp
= (PyLongObject
*)long_new(&PyLong_Type
, args
, kwds
);
4057 assert(PyLong_CheckExact(tmp
));
4061 newobj
= (PyLongObject
*)type
->tp_alloc(type
, n
);
4062 if (newobj
== NULL
) {
4066 assert(PyLong_Check(newobj
));
4067 Py_SIZE(newobj
) = Py_SIZE(tmp
);
4068 for (i
= 0; i
< n
; i
++)
4069 newobj
->ob_digit
[i
] = tmp
->ob_digit
[i
];
4071 return (PyObject
*)newobj
;
4075 long_getnewargs(PyLongObject
*v
)
4077 return Py_BuildValue("(N)", _PyLong_Copy(v
));
4081 long_get0(PyLongObject
*v
, void *context
) {
4082 return PyLong_FromLong(0L);
4086 long_get1(PyLongObject
*v
, void *context
) {
4087 return PyLong_FromLong(1L);
4091 long__format__(PyObject
*self
, PyObject
*args
)
4093 PyObject
*format_spec
;
4095 if (!PyArg_ParseTuple(args
, "O:__format__", &format_spec
))
4097 if (PyBytes_Check(format_spec
))
4098 return _PyLong_FormatAdvanced(self
,
4099 PyBytes_AS_STRING(format_spec
),
4100 PyBytes_GET_SIZE(format_spec
));
4101 if (PyUnicode_Check(format_spec
)) {
4102 /* Convert format_spec to a str */
4104 PyObject
*str_spec
= PyObject_Str(format_spec
);
4106 if (str_spec
== NULL
)
4109 result
= _PyLong_FormatAdvanced(self
,
4110 PyBytes_AS_STRING(str_spec
),
4111 PyBytes_GET_SIZE(str_spec
));
4113 Py_DECREF(str_spec
);
4116 PyErr_SetString(PyExc_TypeError
, "__format__ requires str or unicode");
4121 long_sizeof(PyLongObject
*v
)
4125 res
= v
->ob_type
->tp_basicsize
+ ABS(Py_SIZE(v
))*sizeof(digit
);
4126 return PyInt_FromSsize_t(res
);
4130 long_bit_length(PyLongObject
*v
)
4132 PyLongObject
*result
, *x
, *y
;
4133 Py_ssize_t ndigits
, msd_bits
= 0;
4137 assert(PyLong_Check(v
));
4139 ndigits
= ABS(Py_SIZE(v
));
4141 return PyInt_FromLong(0);
4143 msd
= v
->ob_digit
[ndigits
-1];
4148 msd_bits
+= (long)(BitLengthTable
[msd
]);
4150 if (ndigits
<= PY_SSIZE_T_MAX
/PyLong_SHIFT
)
4151 return PyInt_FromSsize_t((ndigits
-1)*PyLong_SHIFT
+ msd_bits
);
4153 /* expression above may overflow; use Python integers instead */
4154 result
= (PyLongObject
*)PyLong_FromSsize_t(ndigits
- 1);
4157 x
= (PyLongObject
*)PyLong_FromLong(PyLong_SHIFT
);
4160 y
= (PyLongObject
*)long_mul(result
, x
);
4167 x
= (PyLongObject
*)PyLong_FromLong((long)msd_bits
);
4170 y
= (PyLongObject
*)long_add(result
, x
);
4177 return (PyObject
*)result
;
4184 PyDoc_STRVAR(long_bit_length_doc
,
4185 "long.bit_length() -> int or long\n\
4187 Number of bits necessary to represent self in binary.\n\
4190 >>> (37L).bit_length()\n\
4195 long_is_finite(PyObject
*v
)
4201 static PyMethodDef long_methods
[] = {
4202 {"conjugate", (PyCFunction
)long_long
, METH_NOARGS
,
4203 "Returns self, the complex conjugate of any long."},
4204 {"bit_length", (PyCFunction
)long_bit_length
, METH_NOARGS
,
4205 long_bit_length_doc
},
4207 {"is_finite", (PyCFunction
)long_is_finite
, METH_NOARGS
,
4208 "Returns always True."},
4210 {"__trunc__", (PyCFunction
)long_long
, METH_NOARGS
,
4211 "Truncating an Integral returns itself."},
4212 {"__getnewargs__", (PyCFunction
)long_getnewargs
, METH_NOARGS
},
4213 {"__format__", (PyCFunction
)long__format__
, METH_VARARGS
},
4214 {"__sizeof__", (PyCFunction
)long_sizeof
, METH_NOARGS
,
4215 "Returns size in memory, in bytes"},
4216 {NULL
, NULL
} /* sentinel */
4219 static PyGetSetDef long_getset
[] = {
4221 (getter
)long_long
, (setter
)NULL
,
4222 "the real part of a complex number",
4225 (getter
)long_get0
, (setter
)NULL
,
4226 "the imaginary part of a complex number",
4229 (getter
)long_long
, (setter
)NULL
,
4230 "the numerator of a rational number in lowest terms",
4233 (getter
)long_get1
, (setter
)NULL
,
4234 "the denominator of a rational number in lowest terms",
4236 {NULL
} /* Sentinel */
4239 PyDoc_STRVAR(long_doc
,
4240 "long(x[, base]) -> integer\n\
4242 Convert a string or number to a long integer, if possible. A floating\n\
4243 point argument will be truncated towards zero (this does not include a\n\
4244 string representation of a floating point number!) When converting a\n\
4245 string, use the optional base. It is an error to supply a base when\n\
4246 converting a non-string.");
4248 static PyNumberMethods long_as_number
= {
4249 (binaryfunc
)long_add
, /*nb_add*/
4250 (binaryfunc
)long_sub
, /*nb_subtract*/
4251 (binaryfunc
)long_mul
, /*nb_multiply*/
4252 long_classic_div
, /*nb_divide*/
4253 long_mod
, /*nb_remainder*/
4254 long_divmod
, /*nb_divmod*/
4255 long_pow
, /*nb_power*/
4256 (unaryfunc
)long_neg
, /*nb_negative*/
4257 (unaryfunc
)long_long
, /*tp_positive*/
4258 (unaryfunc
)long_abs
, /*tp_absolute*/
4259 (inquiry
)long_nonzero
, /*tp_nonzero*/
4260 (unaryfunc
)long_invert
, /*nb_invert*/
4261 long_lshift
, /*nb_lshift*/
4262 (binaryfunc
)long_rshift
, /*nb_rshift*/
4263 long_and
, /*nb_and*/
4264 long_xor
, /*nb_xor*/
4266 long_coerce
, /*nb_coerce*/
4267 long_int
, /*nb_int*/
4268 long_long
, /*nb_long*/
4269 long_float
, /*nb_float*/
4270 long_oct
, /*nb_oct*/
4271 long_hex
, /*nb_hex*/
4272 0, /* nb_inplace_add */
4273 0, /* nb_inplace_subtract */
4274 0, /* nb_inplace_multiply */
4275 0, /* nb_inplace_divide */
4276 0, /* nb_inplace_remainder */
4277 0, /* nb_inplace_power */
4278 0, /* nb_inplace_lshift */
4279 0, /* nb_inplace_rshift */
4280 0, /* nb_inplace_and */
4281 0, /* nb_inplace_xor */
4282 0, /* nb_inplace_or */
4283 long_div
, /* nb_floor_divide */
4284 long_true_divide
, /* nb_true_divide */
4285 0, /* nb_inplace_floor_divide */
4286 0, /* nb_inplace_true_divide */
4287 long_long
, /* nb_index */
4290 PyTypeObject PyLong_Type
= {
4291 PyObject_HEAD_INIT(&PyType_Type
)
4293 "long", /* tp_name */
4294 offsetof(PyLongObject
, ob_digit
), /* tp_basicsize */
4295 sizeof(digit
), /* tp_itemsize */
4296 long_dealloc
, /* tp_dealloc */
4300 (cmpfunc
)long_compare
, /* tp_compare */
4301 long_repr
, /* tp_repr */
4302 &long_as_number
, /* tp_as_number */
4303 0, /* tp_as_sequence */
4304 0, /* tp_as_mapping */
4305 (hashfunc
)long_hash
, /* tp_hash */
4307 long_str
, /* tp_str */
4308 PyObject_GenericGetAttr
, /* tp_getattro */
4309 0, /* tp_setattro */
4310 0, /* tp_as_buffer */
4311 Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_CHECKTYPES
|
4312 Py_TPFLAGS_BASETYPE
| Py_TPFLAGS_LONG_SUBCLASS
, /* tp_flags */
4313 long_doc
, /* tp_doc */
4314 0, /* tp_traverse */
4316 0, /* tp_richcompare */
4317 0, /* tp_weaklistoffset */
4319 0, /* tp_iternext */
4320 long_methods
, /* tp_methods */
4322 long_getset
, /* tp_getset */
4325 0, /* tp_descr_get */
4326 0, /* tp_descr_set */
4327 0, /* tp_dictoffset */
4330 long_new
, /* tp_new */
4331 PyObject_Del
, /* tp_free */
4334 static PyTypeObject Long_InfoType
;
4336 PyDoc_STRVAR(long_info__doc__
,
4339 A struct sequence that holds information about Python's\n\
4340 internal representation of integers. The attributes are read only.");
4342 static PyStructSequence_Field long_info_fields
[] = {
4343 {"bits_per_digit", "size of a digit in bits"},
4344 {"sizeof_digit", "size in bytes of the C type used to represent a digit"},
4348 static PyStructSequence_Desc long_info_desc
= {
4349 "sys.long_info", /* name */
4350 long_info__doc__
, /* doc */
4351 long_info_fields
, /* fields */
4352 2 /* number of fields */
4356 PyLong_GetInfo(void)
4358 PyObject
* long_info
;
4360 long_info
= PyStructSequence_New(&Long_InfoType
);
4361 if (long_info
== NULL
)
4363 PyStructSequence_SET_ITEM(long_info
, field
++,
4364 PyInt_FromLong(PyLong_SHIFT
));
4365 PyStructSequence_SET_ITEM(long_info
, field
++,
4366 PyInt_FromLong(sizeof(digit
)));
4367 if (PyErr_Occurred()) {
4368 Py_CLEAR(long_info
);
4377 /* initialize long_info */
4378 if (Long_InfoType
.tp_name
== 0)
4379 PyStructSequence_InitType(&Long_InfoType
, &long_info_desc
);