1 /* Long (arbitrary precision) integer object implementation */
3 /* XXX The functional organization of this file is terrible */
6 #include "longintrepr.h"
13 /* For long multiplication, use the O(N**2) school algorithm unless
14 * both operands contain more than KARATSUBA_CUTOFF digits (this
15 * being an internal Python long digit, in base PyLong_BASE).
17 #define KARATSUBA_CUTOFF 70
18 #define KARATSUBA_SQUARE_CUTOFF (2 * KARATSUBA_CUTOFF)
20 /* For exponentiation, use the binary left-to-right algorithm
21 * unless the exponent contains more than FIVEARY_CUTOFF digits.
22 * In that case, do 5 bits at a time. The potential drawback is that
23 * a table of 2**5 intermediate results is computed.
25 #define FIVEARY_CUTOFF 8
28 #define ABS(x) ((x) < 0 ? -(x) : (x))
32 #define MAX(x, y) ((x) < (y) ? (y) : (x))
36 #define MIN(x, y) ((x) > (y) ? (y) : (x))
39 #define SIGCHECK(PyTryBlock) \
41 if (--_Py_Ticker < 0) { \
42 _Py_Ticker = _Py_CheckInterval; \
43 if (PyErr_CheckSignals()) PyTryBlock \
47 /* Normalize (remove leading zeros from) a long int object.
48 Doesn't attempt to free the storage--in most cases, due to the nature
49 of the algorithms used, this could save at most be one word anyway. */
52 long_normalize(register PyLongObject
*v
)
54 Py_ssize_t j
= ABS(Py_SIZE(v
));
57 while (i
> 0 && v
->ob_digit
[i
-1] == 0)
60 Py_SIZE(v
) = (Py_SIZE(v
) < 0) ? -(i
) : i
;
64 /* Allocate a new long int object with size digits.
65 Return NULL and set exception if we run out of memory. */
67 #define MAX_LONG_DIGITS \
68 ((PY_SSIZE_T_MAX - offsetof(PyLongObject, ob_digit))/sizeof(digit))
71 _PyLong_New(Py_ssize_t size
)
73 if (size
> (Py_ssize_t
)MAX_LONG_DIGITS
) {
74 PyErr_SetString(PyExc_OverflowError
,
75 "too many digits in integer");
78 /* coverity[ampersand_in_size] */
79 /* XXX(nnorwitz): PyObject_NEW_VAR / _PyObject_VAR_SIZE need to detect
81 return PyObject_NEW_VAR(PyLongObject
, &PyLong_Type
, size
);
85 _PyLong_Copy(PyLongObject
*src
)
94 result
= _PyLong_New(i
);
96 result
->ob_size
= src
->ob_size
;
98 result
->ob_digit
[i
] = src
->ob_digit
[i
];
100 return (PyObject
*)result
;
103 /* Create a new long int object from a C long int */
106 PyLong_FromLong(long ival
)
109 unsigned long abs_ival
;
110 unsigned long t
; /* unsigned so >> doesn't propagate sign bit */
115 /* if LONG_MIN == -LONG_MAX-1 (true on most platforms) then
116 ANSI C says that the result of -ival is undefined when ival
117 == LONG_MIN. Hence the following workaround. */
118 abs_ival
= (unsigned long)(-1-ival
) + 1;
122 abs_ival
= (unsigned long)ival
;
125 /* Count the number of Python digits.
126 We used to pick 5 ("big enough for anything"), but that's a
127 waste of time and space given that 5*15 = 75 bits are rarely
134 v
= _PyLong_New(ndigits
);
136 digit
*p
= v
->ob_digit
;
137 v
->ob_size
= negative
? -ndigits
: ndigits
;
140 *p
++ = (digit
)(t
& PyLong_MASK
);
144 return (PyObject
*)v
;
147 /* Create a new long int object from a C unsigned long int */
150 PyLong_FromUnsignedLong(unsigned long ival
)
156 /* Count the number of Python digits. */
157 t
= (unsigned long)ival
;
162 v
= _PyLong_New(ndigits
);
164 digit
*p
= v
->ob_digit
;
165 Py_SIZE(v
) = ndigits
;
167 *p
++ = (digit
)(ival
& PyLong_MASK
);
168 ival
>>= PyLong_SHIFT
;
171 return (PyObject
*)v
;
174 /* Create a new long int object from a C double */
177 PyLong_FromDouble(double dval
)
181 int i
, ndig
, expo
, neg
;
183 if (Py_IS_INFINITY(dval
)) {
184 PyErr_SetString(PyExc_OverflowError
,
185 "cannot convert float infinity to integer");
188 if (Py_IS_NAN(dval
)) {
189 PyErr_SetString(PyExc_ValueError
,
190 "cannot convert float NaN to integer");
197 frac
= frexp(dval
, &expo
); /* dval = frac*2**expo; 0.0 <= frac < 1.0 */
199 return PyLong_FromLong(0L);
200 ndig
= (expo
-1) / PyLong_SHIFT
+ 1; /* Number of 'digits' in result */
201 v
= _PyLong_New(ndig
);
204 frac
= ldexp(frac
, (expo
-1) % PyLong_SHIFT
+ 1);
205 for (i
= ndig
; --i
>= 0; ) {
206 digit bits
= (digit
)frac
;
207 v
->ob_digit
[i
] = bits
;
208 frac
= frac
- (double)bits
;
209 frac
= ldexp(frac
, PyLong_SHIFT
);
212 Py_SIZE(v
) = -(Py_SIZE(v
));
213 return (PyObject
*)v
;
216 /* Checking for overflow in PyLong_AsLong is a PITA since C doesn't define
217 * anything about what happens when a signed integer operation overflows,
218 * and some compilers think they're doing you a favor by being "clever"
219 * then. The bit pattern for the largest postive signed long is
220 * (unsigned long)LONG_MAX, and for the smallest negative signed long
221 * it is abs(LONG_MIN), which we could write -(unsigned long)LONG_MIN.
222 * However, some other compilers warn about applying unary minus to an
223 * unsigned operand. Hence the weird "0-".
225 #define PY_ABS_LONG_MIN (0-(unsigned long)LONG_MIN)
226 #define PY_ABS_SSIZE_T_MIN (0-(size_t)PY_SSIZE_T_MIN)
228 /* Get a C long int from a Python long or Python int object.
229 On overflow, returns -1 and sets *overflow to 1 or -1 depending
230 on the sign of the result. Otherwise *overflow is 0.
232 For other errors (e.g., type error), returns -1 and sets an error
237 PyLong_AsLongAndOverflow(PyObject
*vv
, int *overflow
)
239 /* This version by Tim Peters */
240 register PyLongObject
*v
;
241 unsigned long x
, prev
;
245 int do_decref
= 0; /* if nb_int was called */
249 PyErr_BadInternalCall();
254 return PyInt_AsLong(vv
);
256 if (!PyLong_Check(vv
)) {
258 nb
= vv
->ob_type
->tp_as_number
;
259 if (nb
== NULL
|| nb
->nb_int
== NULL
) {
260 PyErr_SetString(PyExc_TypeError
,
261 "an integer is required");
264 vv
= (*nb
->nb_int
) (vv
);
268 if(PyInt_Check(vv
)) {
269 res
= PyInt_AsLong(vv
);
272 if (!PyLong_Check(vv
)) {
274 PyErr_SetString(PyExc_TypeError
,
275 "nb_int should return int object");
281 v
= (PyLongObject
*)vv
;
286 res
= -(sdigit
)v
->ob_digit
[0];
292 res
= v
->ob_digit
[0];
303 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
304 if ((x
>> PyLong_SHIFT
) != prev
) {
309 /* Haven't lost any bits, but casting to long requires extra
310 * care (see comment above).
312 if (x
<= (unsigned long)LONG_MAX
) {
313 res
= (long)x
* sign
;
315 else if (sign
< 0 && x
== PY_ABS_LONG_MIN
) {
320 /* res is already set to -1 */
330 /* Get a C long int from a long int object.
331 Returns -1 and sets an error condition if overflow occurs. */
334 PyLong_AsLong(PyObject
*obj
)
337 long result
= PyLong_AsLongAndOverflow(obj
, &overflow
);
339 /* XXX: could be cute and give a different
340 message for overflow == -1 */
341 PyErr_SetString(PyExc_OverflowError
,
342 "Python int too large to convert to C long");
347 /* Get a Py_ssize_t from a long int object.
348 Returns -1 and sets an error condition if overflow occurs. */
351 PyLong_AsSsize_t(PyObject
*vv
) {
352 register PyLongObject
*v
;
357 if (vv
== NULL
|| !PyLong_Check(vv
)) {
358 PyErr_BadInternalCall();
361 v
= (PyLongObject
*)vv
;
371 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
372 if ((x
>> PyLong_SHIFT
) != prev
)
375 /* Haven't lost any bits, but casting to a signed type requires
376 * extra care (see comment above).
378 if (x
<= (size_t)PY_SSIZE_T_MAX
) {
379 return (Py_ssize_t
)x
* sign
;
381 else if (sign
< 0 && x
== PY_ABS_SSIZE_T_MIN
) {
382 return PY_SSIZE_T_MIN
;
387 PyErr_SetString(PyExc_OverflowError
,
388 "long int too large to convert to int");
392 /* Get a C unsigned long int from a long int object.
393 Returns -1 and sets an error condition if overflow occurs. */
396 PyLong_AsUnsignedLong(PyObject
*vv
)
398 register PyLongObject
*v
;
399 unsigned long x
, prev
;
402 if (vv
== NULL
|| !PyLong_Check(vv
)) {
403 if (vv
!= NULL
&& PyInt_Check(vv
)) {
404 long val
= PyInt_AsLong(vv
);
406 PyErr_SetString(PyExc_OverflowError
,
407 "can't convert negative value "
409 return (unsigned long) -1;
413 PyErr_BadInternalCall();
414 return (unsigned long) -1;
416 v
= (PyLongObject
*)vv
;
420 PyErr_SetString(PyExc_OverflowError
,
421 "can't convert negative value to unsigned long");
422 return (unsigned long) -1;
426 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
427 if ((x
>> PyLong_SHIFT
) != prev
) {
428 PyErr_SetString(PyExc_OverflowError
,
429 "long int too large to convert");
430 return (unsigned long) -1;
436 /* Get a C unsigned long int from a long int object, ignoring the high bits.
437 Returns -1 and sets an error condition if an error occurs. */
440 PyLong_AsUnsignedLongMask(PyObject
*vv
)
442 register PyLongObject
*v
;
447 if (vv
== NULL
|| !PyLong_Check(vv
)) {
448 if (vv
!= NULL
&& PyInt_Check(vv
))
449 return PyInt_AsUnsignedLongMask(vv
);
450 PyErr_BadInternalCall();
451 return (unsigned long) -1;
453 v
= (PyLongObject
*)vv
;
462 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
468 _PyLong_Sign(PyObject
*vv
)
470 PyLongObject
*v
= (PyLongObject
*)vv
;
473 assert(PyLong_Check(v
));
475 return Py_SIZE(v
) == 0 ? 0 : (Py_SIZE(v
) < 0 ? -1 : 1);
479 _PyLong_NumBits(PyObject
*vv
)
481 PyLongObject
*v
= (PyLongObject
*)vv
;
486 assert(PyLong_Check(v
));
487 ndigits
= ABS(Py_SIZE(v
));
488 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
490 digit msd
= v
->ob_digit
[ndigits
- 1];
492 result
= (ndigits
- 1) * PyLong_SHIFT
;
493 if (result
/ PyLong_SHIFT
!= (size_t)(ndigits
- 1))
505 PyErr_SetString(PyExc_OverflowError
, "long has too many bits "
506 "to express in a platform size_t");
511 _PyLong_FromByteArray(const unsigned char* bytes
, size_t n
,
512 int little_endian
, int is_signed
)
514 const unsigned char* pstartbyte
; /* LSB of bytes */
515 int incr
; /* direction to move pstartbyte */
516 const unsigned char* pendbyte
; /* MSB of bytes */
517 size_t numsignificantbytes
; /* number of bytes that matter */
518 Py_ssize_t ndigits
; /* number of Python long digits */
519 PyLongObject
* v
; /* result */
520 Py_ssize_t idigit
= 0; /* next free index in v->ob_digit */
523 return PyLong_FromLong(0L);
527 pendbyte
= bytes
+ n
- 1;
531 pstartbyte
= bytes
+ n
- 1;
537 is_signed
= *pendbyte
>= 0x80;
539 /* Compute numsignificantbytes. This consists of finding the most
540 significant byte. Leading 0 bytes are insignificant if the number
541 is positive, and leading 0xff bytes if negative. */
544 const unsigned char* p
= pendbyte
;
545 const int pincr
= -incr
; /* search MSB to LSB */
546 const unsigned char insignficant
= is_signed
? 0xff : 0x00;
548 for (i
= 0; i
< n
; ++i
, p
+= pincr
) {
549 if (*p
!= insignficant
)
552 numsignificantbytes
= n
- i
;
553 /* 2's-comp is a bit tricky here, e.g. 0xff00 == -0x0100, so
554 actually has 2 significant bytes. OTOH, 0xff0001 ==
555 -0x00ffff, so we wouldn't *need* to bump it there; but we
556 do for 0xffff = -0x0001. To be safe without bothering to
557 check every case, bump it regardless. */
558 if (is_signed
&& numsignificantbytes
< n
)
559 ++numsignificantbytes
;
562 /* How many Python long digits do we need? We have
563 8*numsignificantbytes bits, and each Python long digit has
564 PyLong_SHIFT bits, so it's the ceiling of the quotient. */
565 /* catch overflow before it happens */
566 if (numsignificantbytes
> (PY_SSIZE_T_MAX
- PyLong_SHIFT
) / 8) {
567 PyErr_SetString(PyExc_OverflowError
,
568 "byte array too long to convert to int");
571 ndigits
= (numsignificantbytes
* 8 + PyLong_SHIFT
- 1) / PyLong_SHIFT
;
572 v
= _PyLong_New(ndigits
);
576 /* Copy the bits over. The tricky parts are computing 2's-comp on
577 the fly for signed numbers, and dealing with the mismatch between
578 8-bit bytes and (probably) 15-bit Python digits.*/
581 twodigits carry
= 1; /* for 2's-comp calculation */
582 twodigits accum
= 0; /* sliding register */
583 unsigned int accumbits
= 0; /* number of bits in accum */
584 const unsigned char* p
= pstartbyte
;
586 for (i
= 0; i
< numsignificantbytes
; ++i
, p
+= incr
) {
587 twodigits thisbyte
= *p
;
588 /* Compute correction for 2's comp, if needed. */
590 thisbyte
= (0xff ^ thisbyte
) + carry
;
591 carry
= thisbyte
>> 8;
594 /* Because we're going LSB to MSB, thisbyte is
595 more significant than what's already in accum,
596 so needs to be prepended to accum. */
597 accum
|= (twodigits
)thisbyte
<< accumbits
;
599 if (accumbits
>= PyLong_SHIFT
) {
600 /* There's enough to fill a Python digit. */
601 assert(idigit
< ndigits
);
602 v
->ob_digit
[idigit
] = (digit
)(accum
& PyLong_MASK
);
604 accum
>>= PyLong_SHIFT
;
605 accumbits
-= PyLong_SHIFT
;
606 assert(accumbits
< PyLong_SHIFT
);
609 assert(accumbits
< PyLong_SHIFT
);
611 assert(idigit
< ndigits
);
612 v
->ob_digit
[idigit
] = (digit
)accum
;
617 Py_SIZE(v
) = is_signed
? -idigit
: idigit
;
618 return (PyObject
*)long_normalize(v
);
622 _PyLong_AsByteArray(PyLongObject
* v
,
623 unsigned char* bytes
, size_t n
,
624 int little_endian
, int is_signed
)
626 Py_ssize_t i
; /* index into v->ob_digit */
627 Py_ssize_t ndigits
; /* |v->ob_size| */
628 twodigits accum
; /* sliding register */
629 unsigned int accumbits
; /* # bits in accum */
630 int do_twos_comp
; /* store 2's-comp? is_signed and v < 0 */
631 digit carry
; /* for computing 2's-comp */
632 size_t j
; /* # bytes filled */
633 unsigned char* p
; /* pointer to next byte in bytes */
634 int pincr
; /* direction to move p */
636 assert(v
!= NULL
&& PyLong_Check(v
));
638 if (Py_SIZE(v
) < 0) {
639 ndigits
= -(Py_SIZE(v
));
641 PyErr_SetString(PyExc_OverflowError
,
642 "can't convert negative long to unsigned");
648 ndigits
= Py_SIZE(v
);
661 /* Copy over all the Python digits.
662 It's crucial that every Python digit except for the MSD contribute
663 exactly PyLong_SHIFT bits to the total, so first assert that the long is
665 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
669 carry
= do_twos_comp
? 1 : 0;
670 for (i
= 0; i
< ndigits
; ++i
) {
671 digit thisdigit
= v
->ob_digit
[i
];
673 thisdigit
= (thisdigit
^ PyLong_MASK
) + carry
;
674 carry
= thisdigit
>> PyLong_SHIFT
;
675 thisdigit
&= PyLong_MASK
;
677 /* Because we're going LSB to MSB, thisdigit is more
678 significant than what's already in accum, so needs to be
679 prepended to accum. */
680 accum
|= (twodigits
)thisdigit
<< accumbits
;
682 /* The most-significant digit may be (probably is) at least
684 if (i
== ndigits
- 1) {
685 /* Count # of sign bits -- they needn't be stored,
686 * although for signed conversion we need later to
687 * make sure at least one sign bit gets stored. */
688 digit s
= do_twos_comp
? thisdigit
^ PyLong_MASK
: thisdigit
;
695 accumbits
+= PyLong_SHIFT
;
697 /* Store as many bytes as possible. */
698 while (accumbits
>= 8) {
702 *p
= (unsigned char)(accum
& 0xff);
709 /* Store the straggler (if any). */
710 assert(accumbits
< 8);
711 assert(carry
== 0); /* else do_twos_comp and *every* digit was 0 */
717 /* Fill leading bits of the byte with sign bits
718 (appropriately pretending that the long had an
719 infinite supply of sign bits). */
720 accum
|= (~(twodigits
)0) << accumbits
;
722 *p
= (unsigned char)(accum
& 0xff);
725 else if (j
== n
&& n
> 0 && is_signed
) {
726 /* The main loop filled the byte array exactly, so the code
727 just above didn't get to ensure there's a sign bit, and the
728 loop below wouldn't add one either. Make sure a sign bit
730 unsigned char msb
= *(p
- pincr
);
731 int sign_bit_set
= msb
>= 0x80;
732 assert(accumbits
== 0);
733 if (sign_bit_set
== do_twos_comp
)
739 /* Fill remaining bytes with copies of the sign bit. */
741 unsigned char signbyte
= do_twos_comp
? 0xffU
: 0U;
742 for ( ; j
< n
; ++j
, p
+= pincr
)
749 PyErr_SetString(PyExc_OverflowError
, "long too big to convert");
754 /* Create a new long (or int) object from a C pointer */
757 PyLong_FromVoidPtr(void *p
)
759 #if SIZEOF_VOID_P <= SIZEOF_LONG
761 return PyLong_FromUnsignedLong((unsigned long)p
);
762 return PyInt_FromLong((long)p
);
765 #ifndef HAVE_LONG_LONG
766 # error "PyLong_FromVoidPtr: sizeof(void*) > sizeof(long), but no long long"
768 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
769 # error "PyLong_FromVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
771 /* optimize null pointers */
773 return PyInt_FromLong(0);
774 return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG
)p
);
776 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
779 /* Get a C pointer from a long object (or an int object in some cases) */
782 PyLong_AsVoidPtr(PyObject
*vv
)
784 /* This function will allow int or long objects. If vv is neither,
785 then the PyLong_AsLong*() functions will raise the exception:
786 PyExc_SystemError, "bad argument to internal function"
788 #if SIZEOF_VOID_P <= SIZEOF_LONG
792 x
= PyInt_AS_LONG(vv
);
793 else if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
794 x
= PyLong_AsLong(vv
);
796 x
= PyLong_AsUnsignedLong(vv
);
799 #ifndef HAVE_LONG_LONG
800 # error "PyLong_AsVoidPtr: sizeof(void*) > sizeof(long), but no long long"
802 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
803 # error "PyLong_AsVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
808 x
= PyInt_AS_LONG(vv
);
809 else if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
810 x
= PyLong_AsLongLong(vv
);
812 x
= PyLong_AsUnsignedLongLong(vv
);
814 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
816 if (x
== -1 && PyErr_Occurred())
821 #ifdef HAVE_LONG_LONG
823 /* Initial PY_LONG_LONG support by Chris Herborth (chrish@qnx.com), later
824 * rewritten to use the newer PyLong_{As,From}ByteArray API.
827 #define IS_LITTLE_ENDIAN (int)*(unsigned char*)&one
828 #define PY_ABS_LLONG_MIN (0-(unsigned PY_LONG_LONG)PY_LLONG_MIN)
830 /* Create a new long int object from a C PY_LONG_LONG int. */
833 PyLong_FromLongLong(PY_LONG_LONG ival
)
836 unsigned PY_LONG_LONG abs_ival
;
837 unsigned PY_LONG_LONG t
; /* unsigned so >> doesn't propagate sign bit */
842 /* avoid signed overflow on negation; see comments
843 in PyLong_FromLong above. */
844 abs_ival
= (unsigned PY_LONG_LONG
)(-1-ival
) + 1;
848 abs_ival
= (unsigned PY_LONG_LONG
)ival
;
851 /* Count the number of Python digits.
852 We used to pick 5 ("big enough for anything"), but that's a
853 waste of time and space given that 5*15 = 75 bits are rarely
860 v
= _PyLong_New(ndigits
);
862 digit
*p
= v
->ob_digit
;
863 Py_SIZE(v
) = negative
? -ndigits
: ndigits
;
866 *p
++ = (digit
)(t
& PyLong_MASK
);
870 return (PyObject
*)v
;
873 /* Create a new long int object from a C unsigned PY_LONG_LONG int. */
876 PyLong_FromUnsignedLongLong(unsigned PY_LONG_LONG ival
)
879 unsigned PY_LONG_LONG t
;
882 /* Count the number of Python digits. */
883 t
= (unsigned PY_LONG_LONG
)ival
;
888 v
= _PyLong_New(ndigits
);
890 digit
*p
= v
->ob_digit
;
891 Py_SIZE(v
) = ndigits
;
893 *p
++ = (digit
)(ival
& PyLong_MASK
);
894 ival
>>= PyLong_SHIFT
;
897 return (PyObject
*)v
;
900 /* Create a new long int object from a C Py_ssize_t. */
903 PyLong_FromSsize_t(Py_ssize_t ival
)
905 Py_ssize_t bytes
= ival
;
907 return _PyLong_FromByteArray((unsigned char *)&bytes
,
908 SIZEOF_SIZE_T
, IS_LITTLE_ENDIAN
, 1);
911 /* Create a new long int object from a C size_t. */
914 PyLong_FromSize_t(size_t ival
)
918 return _PyLong_FromByteArray((unsigned char *)&bytes
,
919 SIZEOF_SIZE_T
, IS_LITTLE_ENDIAN
, 0);
922 /* Get a C PY_LONG_LONG int from a long int object.
923 Return -1 and set an error if overflow occurs. */
926 PyLong_AsLongLong(PyObject
*vv
)
933 PyErr_BadInternalCall();
936 if (!PyLong_Check(vv
)) {
940 return (PY_LONG_LONG
)PyInt_AsLong(vv
);
941 if ((nb
= vv
->ob_type
->tp_as_number
) == NULL
||
942 nb
->nb_int
== NULL
) {
943 PyErr_SetString(PyExc_TypeError
, "an integer is required");
946 io
= (*nb
->nb_int
) (vv
);
949 if (PyInt_Check(io
)) {
950 bytes
= PyInt_AsLong(io
);
954 if (PyLong_Check(io
)) {
955 bytes
= PyLong_AsLongLong(io
);
960 PyErr_SetString(PyExc_TypeError
, "integer conversion failed");
964 res
= _PyLong_AsByteArray((PyLongObject
*)vv
, (unsigned char *)&bytes
,
965 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 1);
967 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
969 return (PY_LONG_LONG
)-1;
974 /* Get a C unsigned PY_LONG_LONG int from a long int object.
975 Return -1 and set an error if overflow occurs. */
977 unsigned PY_LONG_LONG
978 PyLong_AsUnsignedLongLong(PyObject
*vv
)
980 unsigned PY_LONG_LONG bytes
;
984 if (vv
== NULL
|| !PyLong_Check(vv
)) {
985 PyErr_BadInternalCall();
986 return (unsigned PY_LONG_LONG
)-1;
989 res
= _PyLong_AsByteArray((PyLongObject
*)vv
, (unsigned char *)&bytes
,
990 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 0);
992 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
994 return (unsigned PY_LONG_LONG
)res
;
999 /* Get a C unsigned long int from a long int object, ignoring the high bits.
1000 Returns -1 and sets an error condition if an error occurs. */
1002 unsigned PY_LONG_LONG
1003 PyLong_AsUnsignedLongLongMask(PyObject
*vv
)
1005 register PyLongObject
*v
;
1006 unsigned PY_LONG_LONG x
;
1010 if (vv
== NULL
|| !PyLong_Check(vv
)) {
1011 PyErr_BadInternalCall();
1012 return (unsigned long) -1;
1014 v
= (PyLongObject
*)vv
;
1023 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
1028 /* Get a C long long int from a Python long or Python int object.
1029 On overflow, returns -1 and sets *overflow to 1 or -1 depending
1030 on the sign of the result. Otherwise *overflow is 0.
1032 For other errors (e.g., type error), returns -1 and sets an error
1037 PyLong_AsLongLongAndOverflow(PyObject
*vv
, int *overflow
)
1039 /* This version by Tim Peters */
1040 register PyLongObject
*v
;
1041 unsigned PY_LONG_LONG x
, prev
;
1045 int do_decref
= 0; /* if nb_int was called */
1049 PyErr_BadInternalCall();
1053 if (PyInt_Check(vv
))
1054 return PyInt_AsLong(vv
);
1056 if (!PyLong_Check(vv
)) {
1057 PyNumberMethods
*nb
;
1058 nb
= vv
->ob_type
->tp_as_number
;
1059 if (nb
== NULL
|| nb
->nb_int
== NULL
) {
1060 PyErr_SetString(PyExc_TypeError
,
1061 "an integer is required");
1064 vv
= (*nb
->nb_int
) (vv
);
1068 if(PyInt_Check(vv
)) {
1069 res
= PyInt_AsLong(vv
);
1072 if (!PyLong_Check(vv
)) {
1074 PyErr_SetString(PyExc_TypeError
,
1075 "nb_int should return int object");
1081 v
= (PyLongObject
*)vv
;
1086 res
= -(sdigit
)v
->ob_digit
[0];
1092 res
= v
->ob_digit
[0];
1103 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
1104 if ((x
>> PyLong_SHIFT
) != prev
) {
1109 /* Haven't lost any bits, but casting to long requires extra
1110 * care (see comment above).
1112 if (x
<= (unsigned PY_LONG_LONG
)PY_LLONG_MAX
) {
1113 res
= (PY_LONG_LONG
)x
* sign
;
1115 else if (sign
< 0 && x
== PY_ABS_LLONG_MIN
) {
1120 /* res is already set to -1 */
1130 #undef IS_LITTLE_ENDIAN
1132 #endif /* HAVE_LONG_LONG */
1136 convert_binop(PyObject
*v
, PyObject
*w
, PyLongObject
**a
, PyLongObject
**b
) {
1137 if (PyLong_Check(v
)) {
1138 *a
= (PyLongObject
*) v
;
1141 else if (PyInt_Check(v
)) {
1142 *a
= (PyLongObject
*) PyLong_FromLong(PyInt_AS_LONG(v
));
1147 if (PyLong_Check(w
)) {
1148 *b
= (PyLongObject
*) w
;
1151 else if (PyInt_Check(w
)) {
1152 *b
= (PyLongObject
*) PyLong_FromLong(PyInt_AS_LONG(w
));
1161 #define CONVERT_BINOP(v, w, a, b) \
1163 if (!convert_binop(v, w, a, b)) { \
1164 Py_INCREF(Py_NotImplemented); \
1165 return Py_NotImplemented; \
1169 /* bits_in_digit(d) returns the unique integer k such that 2**(k-1) <= d <
1170 2**k if d is nonzero, else 0. */
1172 static const unsigned char BitLengthTable
[32] = {
1173 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
1174 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
1178 bits_in_digit(digit d
)
1185 d_bits
+= (int)BitLengthTable
[d
];
1189 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1190 * is modified in place, by adding y to it. Carries are propagated as far as
1191 * x[m-1], and the remaining carry (0 or 1) is returned.
1194 v_iadd(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1200 for (i
= 0; i
< n
; ++i
) {
1201 carry
+= x
[i
] + y
[i
];
1202 x
[i
] = carry
& PyLong_MASK
;
1203 carry
>>= PyLong_SHIFT
;
1204 assert((carry
& 1) == carry
);
1206 for (; carry
&& i
< m
; ++i
) {
1208 x
[i
] = carry
& PyLong_MASK
;
1209 carry
>>= PyLong_SHIFT
;
1210 assert((carry
& 1) == carry
);
1215 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1216 * is modified in place, by subtracting y from it. Borrows are propagated as
1217 * far as x[m-1], and the remaining borrow (0 or 1) is returned.
1220 v_isub(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1226 for (i
= 0; i
< n
; ++i
) {
1227 borrow
= x
[i
] - y
[i
] - borrow
;
1228 x
[i
] = borrow
& PyLong_MASK
;
1229 borrow
>>= PyLong_SHIFT
;
1230 borrow
&= 1; /* keep only 1 sign bit */
1232 for (; borrow
&& i
< m
; ++i
) {
1233 borrow
= x
[i
] - borrow
;
1234 x
[i
] = borrow
& PyLong_MASK
;
1235 borrow
>>= PyLong_SHIFT
;
1241 /* Shift digit vector a[0:m] d bits left, with 0 <= d < PyLong_SHIFT. Put
1242 * result in z[0:m], and return the d bits shifted out of the top.
1245 v_lshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1250 assert(0 <= d
&& d
< PyLong_SHIFT
);
1251 for (i
=0; i
< m
; i
++) {
1252 twodigits acc
= (twodigits
)a
[i
] << d
| carry
;
1253 z
[i
] = (digit
)acc
& PyLong_MASK
;
1254 carry
= (digit
)(acc
>> PyLong_SHIFT
);
1259 /* Shift digit vector a[0:m] d bits right, with 0 <= d < PyLong_SHIFT. Put
1260 * result in z[0:m], and return the d bits shifted out of the bottom.
1263 v_rshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1267 digit mask
= ((digit
)1 << d
) - 1U;
1269 assert(0 <= d
&& d
< PyLong_SHIFT
);
1270 for (i
=m
; i
-- > 0;) {
1271 twodigits acc
= (twodigits
)carry
<< PyLong_SHIFT
| a
[i
];
1272 carry
= (digit
)acc
& mask
;
1273 z
[i
] = (digit
)(acc
>> d
);
1278 /* Divide long pin, w/ size digits, by non-zero digit n, storing quotient
1279 in pout, and returning the remainder. pin and pout point at the LSD.
1280 It's OK for pin == pout on entry, which saves oodles of mallocs/frees in
1281 _PyLong_Format, but that should be done with great care since longs are
1285 inplace_divrem1(digit
*pout
, digit
*pin
, Py_ssize_t size
, digit n
)
1289 assert(n
> 0 && n
<= PyLong_MASK
);
1292 while (--size
>= 0) {
1294 rem
= (rem
<< PyLong_SHIFT
) | *--pin
;
1295 *--pout
= hi
= (digit
)(rem
/ n
);
1296 rem
-= (twodigits
)hi
* n
;
1301 /* Divide a long integer by a digit, returning both the quotient
1302 (as function result) and the remainder (through *prem).
1303 The sign of a is ignored; n should not be zero. */
1305 static PyLongObject
*
1306 divrem1(PyLongObject
*a
, digit n
, digit
*prem
)
1308 const Py_ssize_t size
= ABS(Py_SIZE(a
));
1311 assert(n
> 0 && n
<= PyLong_MASK
);
1312 z
= _PyLong_New(size
);
1315 *prem
= inplace_divrem1(z
->ob_digit
, a
->ob_digit
, size
, n
);
1316 return long_normalize(z
);
1319 /* Convert a long integer to a base 10 string. Returns a new non-shared
1320 string. (Return value is non-shared so that callers can modify the
1321 returned value if necessary.) */
1324 long_to_decimal_string(PyObject
*aa
, int addL
)
1326 PyLongObject
*scratch
, *a
;
1328 Py_ssize_t size
, strlen
, size_a
, i
, j
;
1329 digit
*pout
, *pin
, rem
, tenpow
;
1333 a
= (PyLongObject
*)aa
;
1334 if (a
== NULL
|| !PyLong_Check(a
)) {
1335 PyErr_BadInternalCall();
1338 size_a
= ABS(Py_SIZE(a
));
1339 negative
= Py_SIZE(a
) < 0;
1341 /* quick and dirty upper bound for the number of digits
1342 required to express a in base _PyLong_DECIMAL_BASE:
1344 #digits = 1 + floor(log2(a) / log2(_PyLong_DECIMAL_BASE))
1346 But log2(a) < size_a * PyLong_SHIFT, and
1347 log2(_PyLong_DECIMAL_BASE) = log2(10) * _PyLong_DECIMAL_SHIFT
1348 > 3 * _PyLong_DECIMAL_SHIFT
1350 if (size_a
> PY_SSIZE_T_MAX
/ PyLong_SHIFT
) {
1351 PyErr_SetString(PyExc_OverflowError
,
1352 "long is too large to format");
1355 /* the expression size_a * PyLong_SHIFT is now safe from overflow */
1356 size
= 1 + size_a
* PyLong_SHIFT
/ (3 * _PyLong_DECIMAL_SHIFT
);
1357 scratch
= _PyLong_New(size
);
1358 if (scratch
== NULL
)
1361 /* convert array of base _PyLong_BASE digits in pin to an array of
1362 base _PyLong_DECIMAL_BASE digits in pout, following Knuth (TAOCP,
1363 Volume 2 (3rd edn), section 4.4, Method 1b). */
1365 pout
= scratch
->ob_digit
;
1367 for (i
= size_a
; --i
>= 0; ) {
1369 for (j
= 0; j
< size
; j
++) {
1370 twodigits z
= (twodigits
)pout
[j
] << PyLong_SHIFT
| hi
;
1371 hi
= (digit
)(z
/ _PyLong_DECIMAL_BASE
);
1372 pout
[j
] = (digit
)(z
- (twodigits
)hi
*
1373 _PyLong_DECIMAL_BASE
);
1376 pout
[size
++] = hi
% _PyLong_DECIMAL_BASE
;
1377 hi
/= _PyLong_DECIMAL_BASE
;
1379 /* check for keyboard interrupt */
1385 /* pout should have at least one digit, so that the case when a = 0
1390 /* calculate exact length of output string, and allocate */
1391 strlen
= (addL
!= 0) + negative
+
1392 1 + (size
- 1) * _PyLong_DECIMAL_SHIFT
;
1395 while (rem
>= tenpow
) {
1399 str
= PyString_FromStringAndSize(NULL
, strlen
);
1405 /* fill the string right-to-left */
1406 p
= PyString_AS_STRING(str
) + strlen
;
1410 /* pout[0] through pout[size-2] contribute exactly
1411 _PyLong_DECIMAL_SHIFT digits each */
1412 for (i
=0; i
< size
- 1; i
++) {
1414 for (j
= 0; j
< _PyLong_DECIMAL_SHIFT
; j
++) {
1415 *--p
= '0' + rem
% 10;
1419 /* pout[size-1]: always produce at least one decimal digit */
1422 *--p
= '0' + rem
% 10;
1430 /* check we've counted correctly */
1431 assert(p
== PyString_AS_STRING(str
));
1433 return (PyObject
*)str
;
1436 /* Convert the long to a string object with given base,
1437 appending a base prefix of 0[box] if base is 2, 8 or 16.
1438 Add a trailing "L" if addL is non-zero.
1439 If newstyle is zero, then use the pre-2.6 behavior of octal having
1440 a leading "0", instead of the prefix "0o" */
1441 PyAPI_FUNC(PyObject
*)
1442 _PyLong_Format(PyObject
*aa
, int base
, int addL
, int newstyle
)
1444 register PyLongObject
*a
= (PyLongObject
*)aa
;
1445 PyStringObject
*str
;
1453 return long_to_decimal_string((PyObject
*)a
, addL
);
1455 if (a
== NULL
|| !PyLong_Check(a
)) {
1456 PyErr_BadInternalCall();
1459 assert(base
>= 2 && base
<= 36);
1460 size_a
= ABS(Py_SIZE(a
));
1462 /* Compute a rough upper bound for the length of the string */
1469 i
= 5 + (addL
? 1 : 0);
1470 /* ensure we don't get signed overflow in sz calculation */
1471 if (size_a
> (PY_SSIZE_T_MAX
- i
) / PyLong_SHIFT
) {
1472 PyErr_SetString(PyExc_OverflowError
,
1473 "long is too large to format");
1476 sz
= i
+ 1 + (size_a
* PyLong_SHIFT
- 1) / bits
;
1478 str
= (PyStringObject
*) PyString_FromStringAndSize((char *)0, sz
);
1481 p
= PyString_AS_STRING(str
) + sz
;
1488 if (a
->ob_size
== 0) {
1491 else if ((base
& (base
- 1)) == 0) {
1492 /* JRH: special case for power-of-2 bases */
1493 twodigits accum
= 0;
1494 int accumbits
= 0; /* # of bits in accum */
1495 int basebits
= 1; /* # of bits in base-1 */
1497 while ((i
>>= 1) > 1)
1500 for (i
= 0; i
< size_a
; ++i
) {
1501 accum
|= (twodigits
)a
->ob_digit
[i
] << accumbits
;
1502 accumbits
+= PyLong_SHIFT
;
1503 assert(accumbits
>= basebits
);
1505 char cdigit
= (char)(accum
& (base
- 1));
1506 cdigit
+= (cdigit
< 10) ? '0' : 'a'-10;
1507 assert(p
> PyString_AS_STRING(str
));
1509 accumbits
-= basebits
;
1511 } while (i
< size_a
-1 ? accumbits
>= basebits
: accum
> 0);
1515 /* Not 0, and base not a power of 2. Divide repeatedly by
1516 base, but for speed use the highest power of base that
1518 Py_ssize_t size
= size_a
;
1519 digit
*pin
= a
->ob_digit
;
1520 PyLongObject
*scratch
;
1521 /* powbasw <- largest power of base that fits in a digit. */
1522 digit powbase
= base
; /* powbase == base ** power */
1525 twodigits newpow
= powbase
* (twodigits
)base
;
1526 if (newpow
>> PyLong_SHIFT
)
1527 /* doesn't fit in a digit */
1529 powbase
= (digit
)newpow
;
1533 /* Get a scratch area for repeated division. */
1534 scratch
= _PyLong_New(size
);
1535 if (scratch
== NULL
) {
1540 /* Repeatedly divide by powbase. */
1542 int ntostore
= power
;
1543 digit rem
= inplace_divrem1(scratch
->ob_digit
,
1544 pin
, size
, powbase
);
1545 pin
= scratch
->ob_digit
; /* no need to use a again */
1546 if (pin
[size
- 1] == 0)
1554 /* Break rem into digits. */
1555 assert(ntostore
> 0);
1557 digit nextrem
= (digit
)(rem
/ base
);
1558 char c
= (char)(rem
- nextrem
* base
);
1559 assert(p
> PyString_AS_STRING(str
));
1560 c
+= (c
< 10) ? '0' : 'a'-10;
1564 /* Termination is a bit delicate: must not
1565 store leading zeroes, so must get out if
1566 remaining quotient and rem are both 0. */
1567 } while (ntostore
&& (size
|| rem
));
1568 } while (size
!= 0);
1576 else if (base
== 8) {
1585 else if (base
== 16) {
1589 else if (base
!= 10) {
1591 *--p
= '0' + base
%10;
1593 *--p
= '0' + base
/10;
1597 if (p
!= PyString_AS_STRING(str
)) {
1598 char *q
= PyString_AS_STRING(str
);
1601 } while ((*q
++ = *p
++) != '\0');
1603 _PyString_Resize((PyObject
**)&str
,
1604 (Py_ssize_t
) (q
- PyString_AS_STRING(str
)));
1606 return (PyObject
*)str
;
1609 /* Table of digit values for 8-bit string -> integer conversion.
1610 * '0' maps to 0, ..., '9' maps to 9.
1611 * 'a' and 'A' map to 10, ..., 'z' and 'Z' map to 35.
1612 * All other indices map to 37.
1613 * Note that when converting a base B string, a char c is a legitimate
1614 * base B digit iff _PyLong_DigitValue[Py_CHARMASK(c)] < B.
1616 int _PyLong_DigitValue
[256] = {
1617 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1618 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1619 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1620 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 37, 37, 37, 37, 37, 37,
1621 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1622 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1623 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1624 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1625 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1626 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1627 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1628 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1629 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1630 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1631 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1632 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1635 /* *str points to the first digit in a string of base `base` digits. base
1636 * is a power of 2 (2, 4, 8, 16, or 32). *str is set to point to the first
1637 * non-digit (which may be *str!). A normalized long is returned.
1638 * The point to this routine is that it takes time linear in the number of
1639 * string characters.
1641 static PyLongObject
*
1642 long_from_binary_base(char **str
, int base
)
1653 assert(base
>= 2 && base
<= 32 && (base
& (base
- 1)) == 0);
1655 for (bits_per_char
= -1; n
; ++bits_per_char
)
1657 /* n <- total # of bits needed, while setting p to end-of-string */
1658 while (_PyLong_DigitValue
[Py_CHARMASK(*p
)] < base
)
1661 /* n <- # of Python digits needed, = ceiling(n/PyLong_SHIFT). */
1662 n
= (p
- start
) * bits_per_char
+ PyLong_SHIFT
- 1;
1663 if (n
/ bits_per_char
< p
- start
) {
1664 PyErr_SetString(PyExc_ValueError
,
1665 "long string too large to convert");
1668 n
= n
/ PyLong_SHIFT
;
1672 /* Read string from right, and fill in long from left; i.e.,
1673 * from least to most significant in both.
1677 pdigit
= z
->ob_digit
;
1678 while (--p
>= start
) {
1679 int k
= _PyLong_DigitValue
[Py_CHARMASK(*p
)];
1680 assert(k
>= 0 && k
< base
);
1681 accum
|= (twodigits
)k
<< bits_in_accum
;
1682 bits_in_accum
+= bits_per_char
;
1683 if (bits_in_accum
>= PyLong_SHIFT
) {
1684 *pdigit
++ = (digit
)(accum
& PyLong_MASK
);
1685 assert(pdigit
- z
->ob_digit
<= n
);
1686 accum
>>= PyLong_SHIFT
;
1687 bits_in_accum
-= PyLong_SHIFT
;
1688 assert(bits_in_accum
< PyLong_SHIFT
);
1691 if (bits_in_accum
) {
1692 assert(bits_in_accum
<= PyLong_SHIFT
);
1693 *pdigit
++ = (digit
)accum
;
1694 assert(pdigit
- z
->ob_digit
<= n
);
1696 while (pdigit
- z
->ob_digit
< n
)
1698 return long_normalize(z
);
1702 PyLong_FromString(char *str
, char **pend
, int base
)
1705 char *start
, *orig_str
= str
;
1707 PyObject
*strobj
, *strrepr
;
1710 if ((base
!= 0 && base
< 2) || base
> 36) {
1711 PyErr_SetString(PyExc_ValueError
,
1712 "long() arg 2 must be >= 2 and <= 36");
1715 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1719 else if (*str
== '-') {
1723 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1726 /* No base given. Deduce the base from the contents
1730 else if (str
[1] == 'x' || str
[1] == 'X')
1732 else if (str
[1] == 'o' || str
[1] == 'O')
1734 else if (str
[1] == 'b' || str
[1] == 'B')
1737 /* "old" (C-style) octal literal, still valid in
1738 2.x, although illegal in 3.x */
1741 /* Whether or not we were deducing the base, skip leading chars
1743 if (str
[0] == '0' &&
1744 ((base
== 16 && (str
[1] == 'x' || str
[1] == 'X')) ||
1745 (base
== 8 && (str
[1] == 'o' || str
[1] == 'O')) ||
1746 (base
== 2 && (str
[1] == 'b' || str
[1] == 'B'))))
1750 if ((base
& (base
- 1)) == 0)
1751 z
= long_from_binary_base(&str
, base
);
1754 Binary bases can be converted in time linear in the number of digits, because
1755 Python's representation base is binary. Other bases (including decimal!) use
1756 the simple quadratic-time algorithm below, complicated by some speed tricks.
1758 First some math: the largest integer that can be expressed in N base-B digits
1759 is B**N-1. Consequently, if we have an N-digit input in base B, the worst-
1760 case number of Python digits needed to hold it is the smallest integer n s.t.
1762 PyLong_BASE**n-1 >= B**N-1 [or, adding 1 to both sides]
1763 PyLong_BASE**n >= B**N [taking logs to base PyLong_BASE]
1764 n >= log(B**N)/log(PyLong_BASE) = N * log(B)/log(PyLong_BASE)
1766 The static array log_base_PyLong_BASE[base] == log(base)/log(PyLong_BASE) so
1767 we can compute this quickly. A Python long with that much space is reserved
1768 near the start, and the result is computed into it.
1770 The input string is actually treated as being in base base**i (i.e., i digits
1771 are processed at a time), where two more static arrays hold:
1773 convwidth_base[base] = the largest integer i such that
1774 base**i <= PyLong_BASE
1775 convmultmax_base[base] = base ** convwidth_base[base]
1777 The first of these is the largest i such that i consecutive input digits
1778 must fit in a single Python digit. The second is effectively the input
1779 base we're really using.
1781 Viewing the input as a sequence <c0, c1, ..., c_n-1> of digits in base
1782 convmultmax_base[base], the result is "simply"
1784 (((c0*B + c1)*B + c2)*B + c3)*B + ... ))) + c_n-1
1786 where B = convmultmax_base[base].
1788 Error analysis: as above, the number of Python digits `n` needed is worst-
1791 n >= N * log(B)/log(PyLong_BASE)
1793 where `N` is the number of input digits in base `B`. This is computed via
1795 size_z = (Py_ssize_t)((scan - str) * log_base_PyLong_BASE[base]) + 1;
1797 below. Two numeric concerns are how much space this can waste, and whether
1798 the computed result can be too small. To be concrete, assume PyLong_BASE =
1799 2**15, which is the default (and it's unlikely anyone changes that).
1801 Waste isn't a problem: provided the first input digit isn't 0, the difference
1802 between the worst-case input with N digits and the smallest input with N
1803 digits is about a factor of B, but B is small compared to PyLong_BASE so at
1804 most one allocated Python digit can remain unused on that count. If
1805 N*log(B)/log(PyLong_BASE) is mathematically an exact integer, then truncating
1806 that and adding 1 returns a result 1 larger than necessary. However, that
1807 can't happen: whenever B is a power of 2, long_from_binary_base() is called
1808 instead, and it's impossible for B**i to be an integer power of 2**15 when B
1809 is not a power of 2 (i.e., it's impossible for N*log(B)/log(PyLong_BASE) to be
1810 an exact integer when B is not a power of 2, since B**i has a prime factor
1811 other than 2 in that case, but (2**15)**j's only prime factor is 2).
1813 The computed result can be too small if the true value of
1814 N*log(B)/log(PyLong_BASE) is a little bit larger than an exact integer, but
1815 due to roundoff errors (in computing log(B), log(PyLong_BASE), their quotient,
1816 and/or multiplying that by N) yields a numeric result a little less than that
1817 integer. Unfortunately, "how close can a transcendental function get to an
1818 integer over some range?" questions are generally theoretically intractable.
1819 Computer analysis via continued fractions is practical: expand
1820 log(B)/log(PyLong_BASE) via continued fractions, giving a sequence i/j of "the
1821 best" rational approximations. Then j*log(B)/log(PyLong_BASE) is
1822 approximately equal to (the integer) i. This shows that we can get very close
1823 to being in trouble, but very rarely. For example, 76573 is a denominator in
1824 one of the continued-fraction approximations to log(10)/log(2**15), and
1827 >>> log(10)/log(2**15)*76573
1830 is very close to an integer. If we were working with IEEE single-precision,
1831 rounding errors could kill us. Finding worst cases in IEEE double-precision
1832 requires better-than-double-precision log() functions, and Tim didn't bother.
1833 Instead the code checks to see whether the allocated space is enough as each
1834 new Python digit is added, and copies the whole thing to a larger long if not.
1835 This should happen extremely rarely, and in fact I don't have a test case
1836 that triggers it(!). Instead the code was tested by artificially allocating
1837 just 1 digit at the start, so that the copying code was exercised for every
1838 digit beyond the first.
1840 register twodigits c
; /* current input character */
1844 twodigits convmultmax
, convmult
;
1848 static double log_base_PyLong_BASE
[37] = {0.0e0
,};
1849 static int convwidth_base
[37] = {0,};
1850 static twodigits convmultmax_base
[37] = {0,};
1852 if (log_base_PyLong_BASE
[base
] == 0.0) {
1853 twodigits convmax
= base
;
1856 log_base_PyLong_BASE
[base
] = (log((double)base
) /
1857 log((double)PyLong_BASE
));
1859 twodigits next
= convmax
* base
;
1860 if (next
> PyLong_BASE
)
1865 convmultmax_base
[base
] = convmax
;
1867 convwidth_base
[base
] = i
;
1870 /* Find length of the string of numeric characters. */
1872 while (_PyLong_DigitValue
[Py_CHARMASK(*scan
)] < base
)
1875 /* Create a long object that can contain the largest possible
1876 * integer with this base and length. Note that there's no
1877 * need to initialize z->ob_digit -- no slot is read up before
1878 * being stored into.
1880 size_z
= (Py_ssize_t
)((scan
- str
) * log_base_PyLong_BASE
[base
]) + 1;
1881 /* Uncomment next line to test exceedingly rare copy code */
1884 z
= _PyLong_New(size_z
);
1889 /* `convwidth` consecutive input digits are treated as a single
1890 * digit in base `convmultmax`.
1892 convwidth
= convwidth_base
[base
];
1893 convmultmax
= convmultmax_base
[base
];
1896 while (str
< scan
) {
1897 /* grab up to convwidth digits from the input string */
1898 c
= (digit
)_PyLong_DigitValue
[Py_CHARMASK(*str
++)];
1899 for (i
= 1; i
< convwidth
&& str
!= scan
; ++i
, ++str
) {
1900 c
= (twodigits
)(c
* base
+
1901 _PyLong_DigitValue
[Py_CHARMASK(*str
)]);
1902 assert(c
< PyLong_BASE
);
1905 convmult
= convmultmax
;
1906 /* Calculate the shift only if we couldn't get
1909 if (i
!= convwidth
) {
1915 /* Multiply z by convmult, and add c. */
1917 pzstop
= pz
+ Py_SIZE(z
);
1918 for (; pz
< pzstop
; ++pz
) {
1919 c
+= (twodigits
)*pz
* convmult
;
1920 *pz
= (digit
)(c
& PyLong_MASK
);
1923 /* carry off the current end? */
1925 assert(c
< PyLong_BASE
);
1926 if (Py_SIZE(z
) < size_z
) {
1932 /* Extremely rare. Get more space. */
1933 assert(Py_SIZE(z
) == size_z
);
1934 tmp
= _PyLong_New(size_z
+ 1);
1939 memcpy(tmp
->ob_digit
,
1941 sizeof(digit
) * size_z
);
1944 z
->ob_digit
[size_z
] = (digit
)c
;
1955 Py_SIZE(z
) = -(Py_SIZE(z
));
1956 if (*str
== 'L' || *str
== 'l')
1958 while (*str
&& isspace(Py_CHARMASK(*str
)))
1964 return (PyObject
*) z
;
1968 slen
= strlen(orig_str
) < 200 ? strlen(orig_str
) : 200;
1969 strobj
= PyString_FromStringAndSize(orig_str
, slen
);
1972 strrepr
= PyObject_Repr(strobj
);
1974 if (strrepr
== NULL
)
1976 PyErr_Format(PyExc_ValueError
,
1977 "invalid literal for long() with base %d: %s",
1978 base
, PyString_AS_STRING(strrepr
));
1983 #ifdef Py_USING_UNICODE
1985 PyLong_FromUnicode(Py_UNICODE
*u
, Py_ssize_t length
, int base
)
1988 char *buffer
= (char *)PyMem_MALLOC(length
+1);
1993 if (PyUnicode_EncodeDecimal(u
, length
, buffer
, NULL
)) {
1997 result
= PyLong_FromString(buffer
, NULL
, base
);
2004 static PyLongObject
*x_divrem
2005 (PyLongObject
*, PyLongObject
*, PyLongObject
**);
2006 static PyObject
*long_long(PyObject
*v
);
2008 /* Long division with remainder, top-level routine */
2011 long_divrem(PyLongObject
*a
, PyLongObject
*b
,
2012 PyLongObject
**pdiv
, PyLongObject
**prem
)
2014 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2018 PyErr_SetString(PyExc_ZeroDivisionError
,
2019 "long division or modulo by zero");
2022 if (size_a
< size_b
||
2023 (size_a
== size_b
&&
2024 a
->ob_digit
[size_a
-1] < b
->ob_digit
[size_b
-1])) {
2026 *pdiv
= _PyLong_New(0);
2030 *prem
= (PyLongObject
*) a
;
2035 z
= divrem1(a
, b
->ob_digit
[0], &rem
);
2038 *prem
= (PyLongObject
*) PyLong_FromLong((long)rem
);
2039 if (*prem
== NULL
) {
2045 z
= x_divrem(a
, b
, prem
);
2050 The quotient z has the sign of a*b;
2051 the remainder r has the sign of a,
2053 if ((a
->ob_size
< 0) != (b
->ob_size
< 0))
2054 z
->ob_size
= -(z
->ob_size
);
2055 if (a
->ob_size
< 0 && (*prem
)->ob_size
!= 0)
2056 (*prem
)->ob_size
= -((*prem
)->ob_size
);
2061 /* Unsigned long division with remainder -- the algorithm. The arguments v1
2062 and w1 should satisfy 2 <= ABS(Py_SIZE(w1)) <= ABS(Py_SIZE(v1)). */
2064 static PyLongObject
*
2065 x_divrem(PyLongObject
*v1
, PyLongObject
*w1
, PyLongObject
**prem
)
2067 PyLongObject
*v
, *w
, *a
;
2068 Py_ssize_t i
, k
, size_v
, size_w
;
2070 digit wm1
, wm2
, carry
, q
, r
, vtop
, *v0
, *vk
, *w0
, *ak
;
2075 /* We follow Knuth [The Art of Computer Programming, Vol. 2 (3rd
2076 edn.), section 4.3.1, Algorithm D], except that we don't explicitly
2077 handle the special case when the initial estimate q for a quotient
2078 digit is >= PyLong_BASE: the max value for q is PyLong_BASE+1, and
2079 that won't overflow a digit. */
2081 /* allocate space; w will also be used to hold the final remainder */
2082 size_v
= ABS(Py_SIZE(v1
));
2083 size_w
= ABS(Py_SIZE(w1
));
2084 assert(size_v
>= size_w
&& size_w
>= 2); /* Assert checks by div() */
2085 v
= _PyLong_New(size_v
+1);
2090 w
= _PyLong_New(size_w
);
2097 /* normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
2098 shift v1 left by the same amount. Results go into w and v. */
2099 d
= PyLong_SHIFT
- bits_in_digit(w1
->ob_digit
[size_w
-1]);
2100 carry
= v_lshift(w
->ob_digit
, w1
->ob_digit
, size_w
, d
);
2102 carry
= v_lshift(v
->ob_digit
, v1
->ob_digit
, size_v
, d
);
2103 if (carry
!= 0 || v
->ob_digit
[size_v
-1] >= w
->ob_digit
[size_w
-1]) {
2104 v
->ob_digit
[size_v
] = carry
;
2108 /* Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
2109 at most (and usually exactly) k = size_v - size_w digits. */
2110 k
= size_v
- size_w
;
2123 for (vk
= v0
+k
, ak
= a
->ob_digit
+ k
; vk
-- > v0
;) {
2124 /* inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
2125 single-digit quotient q, remainder in vk[0:size_w]. */
2135 /* estimate quotient digit q; may overestimate by 1 (rare) */
2137 assert(vtop
<= wm1
);
2138 vv
= ((twodigits
)vtop
<< PyLong_SHIFT
) | vk
[size_w
-1];
2139 q
= (digit
)(vv
/ wm1
);
2140 r
= (digit
)(vv
- (twodigits
)wm1
* q
); /* r = vv % wm1 */
2141 while ((twodigits
)wm2
* q
> (((twodigits
)r
<< PyLong_SHIFT
)
2145 if (r
>= PyLong_BASE
)
2148 assert(q
<= PyLong_BASE
);
2150 /* subtract q*w0[0:size_w] from vk[0:size_w+1] */
2152 for (i
= 0; i
< size_w
; ++i
) {
2153 /* invariants: -PyLong_BASE <= -q <= zhi <= 0;
2154 -PyLong_BASE * q <= z < PyLong_BASE */
2155 z
= (sdigit
)vk
[i
] + zhi
-
2156 (stwodigits
)q
* (stwodigits
)w0
[i
];
2157 vk
[i
] = (digit
)z
& PyLong_MASK
;
2158 zhi
= (sdigit
)Py_ARITHMETIC_RIGHT_SHIFT(stwodigits
,
2162 /* add w back if q was too large (this branch taken rarely) */
2163 assert((sdigit
)vtop
+ zhi
== -1 || (sdigit
)vtop
+ zhi
== 0);
2164 if ((sdigit
)vtop
+ zhi
< 0) {
2166 for (i
= 0; i
< size_w
; ++i
) {
2167 carry
+= vk
[i
] + w0
[i
];
2168 vk
[i
] = carry
& PyLong_MASK
;
2169 carry
>>= PyLong_SHIFT
;
2174 /* store quotient digit */
2175 assert(q
< PyLong_BASE
);
2179 /* unshift remainder; we reuse w to store the result */
2180 carry
= v_rshift(w0
, v0
, size_w
, d
);
2184 *prem
= long_normalize(w
);
2185 return long_normalize(a
);
2188 /* For a nonzero PyLong a, express a in the form x * 2**e, with 0.5 <=
2189 abs(x) < 1.0 and e >= 0; return x and put e in *e. Here x is
2190 rounded to DBL_MANT_DIG significant bits using round-half-to-even.
2191 If a == 0, return 0.0 and set *e = 0. If the resulting exponent
2192 e is larger than PY_SSIZE_T_MAX, raise OverflowError and return
2195 /* attempt to define 2.0**DBL_MANT_DIG as a compile-time constant */
2196 #if DBL_MANT_DIG == 53
2197 #define EXP2_DBL_MANT_DIG 9007199254740992.0
2199 #define EXP2_DBL_MANT_DIG (ldexp(1.0, DBL_MANT_DIG))
2203 _PyLong_Frexp(PyLongObject
*a
, Py_ssize_t
*e
)
2205 Py_ssize_t a_size
, a_bits
, shift_digits
, shift_bits
, x_size
;
2206 /* See below for why x_digits is always large enough. */
2207 digit rem
, x_digits
[2 + (DBL_MANT_DIG
+ 1) / PyLong_SHIFT
];
2209 /* Correction term for round-half-to-even rounding. For a digit x,
2210 "x + half_even_correction[x & 7]" gives x rounded to the nearest
2211 multiple of 4, rounding ties to a multiple of 8. */
2212 static const int half_even_correction
[8] = {0, -1, -2, 1, 0, -1, 2, 1};
2214 a_size
= ABS(Py_SIZE(a
));
2216 /* Special case for 0: significand 0.0, exponent 0. */
2220 a_bits
= bits_in_digit(a
->ob_digit
[a_size
-1]);
2221 /* The following is an overflow-free version of the check
2222 "if ((a_size - 1) * PyLong_SHIFT + a_bits > PY_SSIZE_T_MAX) ..." */
2223 if (a_size
>= (PY_SSIZE_T_MAX
- 1) / PyLong_SHIFT
+ 1 &&
2224 (a_size
> (PY_SSIZE_T_MAX
- 1) / PyLong_SHIFT
+ 1 ||
2225 a_bits
> (PY_SSIZE_T_MAX
- 1) % PyLong_SHIFT
+ 1))
2227 a_bits
= (a_size
- 1) * PyLong_SHIFT
+ a_bits
;
2229 /* Shift the first DBL_MANT_DIG + 2 bits of a into x_digits[0:x_size]
2230 (shifting left if a_bits <= DBL_MANT_DIG + 2).
2232 Number of digits needed for result: write // for floor division.
2233 Then if shifting left, we end up using
2235 1 + a_size + (DBL_MANT_DIG + 2 - a_bits) // PyLong_SHIFT
2237 digits. If shifting right, we use
2239 a_size - (a_bits - DBL_MANT_DIG - 2) // PyLong_SHIFT
2241 digits. Using a_size = 1 + (a_bits - 1) // PyLong_SHIFT along with
2244 m // PyLong_SHIFT + n // PyLong_SHIFT <= (m + n) // PyLong_SHIFT
2245 m // PyLong_SHIFT - n // PyLong_SHIFT <=
2246 1 + (m - n - 1) // PyLong_SHIFT,
2248 valid for any integers m and n, we find that x_size satisfies
2250 x_size <= 2 + (DBL_MANT_DIG + 1) // PyLong_SHIFT
2254 if (a_bits
<= DBL_MANT_DIG
+ 2) {
2255 shift_digits
= (DBL_MANT_DIG
+ 2 - a_bits
) / PyLong_SHIFT
;
2256 shift_bits
= (DBL_MANT_DIG
+ 2 - a_bits
) % PyLong_SHIFT
;
2258 while (x_size
< shift_digits
)
2259 x_digits
[x_size
++] = 0;
2260 rem
= v_lshift(x_digits
+ x_size
, a
->ob_digit
, a_size
,
2263 x_digits
[x_size
++] = rem
;
2266 shift_digits
= (a_bits
- DBL_MANT_DIG
- 2) / PyLong_SHIFT
;
2267 shift_bits
= (a_bits
- DBL_MANT_DIG
- 2) % PyLong_SHIFT
;
2268 rem
= v_rshift(x_digits
, a
->ob_digit
+ shift_digits
,
2269 a_size
- shift_digits
, (int)shift_bits
);
2270 x_size
= a_size
- shift_digits
;
2271 /* For correct rounding below, we need the least significant
2272 bit of x to be 'sticky' for this shift: if any of the bits
2273 shifted out was nonzero, we set the least significant bit
2278 while (shift_digits
> 0)
2279 if (a
->ob_digit
[--shift_digits
]) {
2284 assert(1 <= x_size
&&
2285 x_size
<= (Py_ssize_t
)(sizeof(x_digits
)/sizeof(digit
)));
2287 /* Round, and convert to double. */
2288 x_digits
[0] += half_even_correction
[x_digits
[0] & 7];
2289 dx
= x_digits
[--x_size
];
2291 dx
= dx
* PyLong_BASE
+ x_digits
[--x_size
];
2293 /* Rescale; make correction if result is 1.0. */
2294 dx
/= 4.0 * EXP2_DBL_MANT_DIG
;
2296 if (a_bits
== PY_SSIZE_T_MAX
)
2303 return Py_SIZE(a
) < 0 ? -dx
: dx
;
2306 /* exponent > PY_SSIZE_T_MAX */
2307 PyErr_SetString(PyExc_OverflowError
,
2308 "huge integer: number of bits overflows a Py_ssize_t");
2313 /* Get a C double from a long int object. Rounds to the nearest double,
2314 using the round-half-to-even rule in the case of a tie. */
2317 PyLong_AsDouble(PyObject
*v
)
2319 Py_ssize_t exponent
;
2322 if (v
== NULL
|| !PyLong_Check(v
)) {
2323 PyErr_BadInternalCall();
2326 x
= _PyLong_Frexp((PyLongObject
*)v
, &exponent
);
2327 if ((x
== -1.0 && PyErr_Occurred()) || exponent
> DBL_MAX_EXP
) {
2328 PyErr_SetString(PyExc_OverflowError
,
2329 "long int too large to convert to float");
2332 return ldexp(x
, (int)exponent
);
2338 long_dealloc(PyObject
*v
)
2340 Py_TYPE(v
)->tp_free(v
);
2344 long_repr(PyObject
*v
)
2346 return _PyLong_Format(v
, 10, 1, 0);
2350 long_str(PyObject
*v
)
2352 return _PyLong_Format(v
, 10, 0, 0);
2356 long_compare(PyLongObject
*a
, PyLongObject
*b
)
2360 if (Py_SIZE(a
) != Py_SIZE(b
)) {
2361 sign
= Py_SIZE(a
) - Py_SIZE(b
);
2364 Py_ssize_t i
= ABS(Py_SIZE(a
));
2365 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2370 sign
= (sdigit
)a
->ob_digit
[i
] - (sdigit
)b
->ob_digit
[i
];
2375 return sign
< 0 ? -1 : sign
> 0 ? 1 : 0;
2379 long_hash(PyLongObject
*v
)
2385 /* This is designed so that Python ints and longs with the
2386 same value hash to the same value, otherwise comparisons
2387 of mapping keys will turn out weird */
2395 /* The following loop produces a C unsigned long x such that x is
2396 congruent to the absolute value of v modulo ULONG_MAX. The
2397 resulting x is nonzero if and only if v is. */
2399 /* Force a native long #-bits (32 or 64) circular shift */
2400 x
= (x
>> (8*SIZEOF_LONG
-PyLong_SHIFT
)) | (x
<< PyLong_SHIFT
);
2401 x
+= v
->ob_digit
[i
];
2402 /* If the addition above overflowed we compensate by
2403 incrementing. This preserves the value modulo
2405 if (x
< v
->ob_digit
[i
])
2409 if (x
== (unsigned long)-1)
2410 x
= (unsigned long)-2;
2415 /* Add the absolute values of two long integers. */
2417 static PyLongObject
*
2418 x_add(PyLongObject
*a
, PyLongObject
*b
)
2420 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2425 /* Ensure a is the larger of the two: */
2426 if (size_a
< size_b
) {
2427 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2428 { Py_ssize_t size_temp
= size_a
;
2430 size_b
= size_temp
; }
2432 z
= _PyLong_New(size_a
+1);
2435 for (i
= 0; i
< size_b
; ++i
) {
2436 carry
+= a
->ob_digit
[i
] + b
->ob_digit
[i
];
2437 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2438 carry
>>= PyLong_SHIFT
;
2440 for (; i
< size_a
; ++i
) {
2441 carry
+= a
->ob_digit
[i
];
2442 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2443 carry
>>= PyLong_SHIFT
;
2445 z
->ob_digit
[i
] = carry
;
2446 return long_normalize(z
);
2449 /* Subtract the absolute values of two integers. */
2451 static PyLongObject
*
2452 x_sub(PyLongObject
*a
, PyLongObject
*b
)
2454 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2460 /* Ensure a is the larger of the two: */
2461 if (size_a
< size_b
) {
2463 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2464 { Py_ssize_t size_temp
= size_a
;
2466 size_b
= size_temp
; }
2468 else if (size_a
== size_b
) {
2469 /* Find highest digit where a and b differ: */
2471 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2474 return _PyLong_New(0);
2475 if (a
->ob_digit
[i
] < b
->ob_digit
[i
]) {
2477 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2479 size_a
= size_b
= i
+1;
2481 z
= _PyLong_New(size_a
);
2484 for (i
= 0; i
< size_b
; ++i
) {
2485 /* The following assumes unsigned arithmetic
2486 works module 2**N for some N>PyLong_SHIFT. */
2487 borrow
= a
->ob_digit
[i
] - b
->ob_digit
[i
] - borrow
;
2488 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2489 borrow
>>= PyLong_SHIFT
;
2490 borrow
&= 1; /* Keep only one sign bit */
2492 for (; i
< size_a
; ++i
) {
2493 borrow
= a
->ob_digit
[i
] - borrow
;
2494 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2495 borrow
>>= PyLong_SHIFT
;
2496 borrow
&= 1; /* Keep only one sign bit */
2498 assert(borrow
== 0);
2500 z
->ob_size
= -(z
->ob_size
);
2501 return long_normalize(z
);
2505 long_add(PyLongObject
*v
, PyLongObject
*w
)
2507 PyLongObject
*a
, *b
, *z
;
2509 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
2511 if (a
->ob_size
< 0) {
2512 if (b
->ob_size
< 0) {
2514 if (z
!= NULL
&& z
->ob_size
!= 0)
2515 z
->ob_size
= -(z
->ob_size
);
2528 return (PyObject
*)z
;
2532 long_sub(PyLongObject
*v
, PyLongObject
*w
)
2534 PyLongObject
*a
, *b
, *z
;
2536 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
2538 if (a
->ob_size
< 0) {
2543 if (z
!= NULL
&& z
->ob_size
!= 0)
2544 z
->ob_size
= -(z
->ob_size
);
2554 return (PyObject
*)z
;
2557 /* Grade school multiplication, ignoring the signs.
2558 * Returns the absolute value of the product, or NULL if error.
2560 static PyLongObject
*
2561 x_mul(PyLongObject
*a
, PyLongObject
*b
)
2564 Py_ssize_t size_a
= ABS(Py_SIZE(a
));
2565 Py_ssize_t size_b
= ABS(Py_SIZE(b
));
2568 z
= _PyLong_New(size_a
+ size_b
);
2572 memset(z
->ob_digit
, 0, Py_SIZE(z
) * sizeof(digit
));
2574 /* Efficient squaring per HAC, Algorithm 14.16:
2575 * http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
2576 * Gives slightly less than a 2x speedup when a == b,
2577 * via exploiting that each entry in the multiplication
2578 * pyramid appears twice (except for the size_a squares).
2580 for (i
= 0; i
< size_a
; ++i
) {
2582 twodigits f
= a
->ob_digit
[i
];
2583 digit
*pz
= z
->ob_digit
+ (i
<< 1);
2584 digit
*pa
= a
->ob_digit
+ i
+ 1;
2585 digit
*paend
= a
->ob_digit
+ size_a
;
2592 carry
= *pz
+ f
* f
;
2593 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2594 carry
>>= PyLong_SHIFT
;
2595 assert(carry
<= PyLong_MASK
);
2597 /* Now f is added in twice in each column of the
2598 * pyramid it appears. Same as adding f<<1 once.
2601 while (pa
< paend
) {
2602 carry
+= *pz
+ *pa
++ * f
;
2603 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2604 carry
>>= PyLong_SHIFT
;
2605 assert(carry
<= (PyLong_MASK
<< 1));
2609 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2610 carry
>>= PyLong_SHIFT
;
2613 *pz
+= (digit
)(carry
& PyLong_MASK
);
2614 assert((carry
>> PyLong_SHIFT
) == 0);
2617 else { /* a is not the same as b -- gradeschool long mult */
2618 for (i
= 0; i
< size_a
; ++i
) {
2619 twodigits carry
= 0;
2620 twodigits f
= a
->ob_digit
[i
];
2621 digit
*pz
= z
->ob_digit
+ i
;
2622 digit
*pb
= b
->ob_digit
;
2623 digit
*pbend
= b
->ob_digit
+ size_b
;
2630 while (pb
< pbend
) {
2631 carry
+= *pz
+ *pb
++ * f
;
2632 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2633 carry
>>= PyLong_SHIFT
;
2634 assert(carry
<= PyLong_MASK
);
2637 *pz
+= (digit
)(carry
& PyLong_MASK
);
2638 assert((carry
>> PyLong_SHIFT
) == 0);
2641 return long_normalize(z
);
2644 /* A helper for Karatsuba multiplication (k_mul).
2645 Takes a long "n" and an integer "size" representing the place to
2646 split, and sets low and high such that abs(n) == (high << size) + low,
2647 viewing the shift as being by digits. The sign bit is ignored, and
2648 the return values are >= 0.
2649 Returns 0 on success, -1 on failure.
2652 kmul_split(PyLongObject
*n
,
2654 PyLongObject
**high
,
2657 PyLongObject
*hi
, *lo
;
2658 Py_ssize_t size_lo
, size_hi
;
2659 const Py_ssize_t size_n
= ABS(Py_SIZE(n
));
2661 size_lo
= MIN(size_n
, size
);
2662 size_hi
= size_n
- size_lo
;
2664 if ((hi
= _PyLong_New(size_hi
)) == NULL
)
2666 if ((lo
= _PyLong_New(size_lo
)) == NULL
) {
2671 memcpy(lo
->ob_digit
, n
->ob_digit
, size_lo
* sizeof(digit
));
2672 memcpy(hi
->ob_digit
, n
->ob_digit
+ size_lo
, size_hi
* sizeof(digit
));
2674 *high
= long_normalize(hi
);
2675 *low
= long_normalize(lo
);
2679 static PyLongObject
*k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
);
2681 /* Karatsuba multiplication. Ignores the input signs, and returns the
2682 * absolute value of the product (or NULL if error).
2683 * See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
2685 static PyLongObject
*
2686 k_mul(PyLongObject
*a
, PyLongObject
*b
)
2688 Py_ssize_t asize
= ABS(Py_SIZE(a
));
2689 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2690 PyLongObject
*ah
= NULL
;
2691 PyLongObject
*al
= NULL
;
2692 PyLongObject
*bh
= NULL
;
2693 PyLongObject
*bl
= NULL
;
2694 PyLongObject
*ret
= NULL
;
2695 PyLongObject
*t1
, *t2
, *t3
;
2696 Py_ssize_t shift
; /* the number of digits we split off */
2699 /* (ah*X+al)(bh*X+bl) = ah*bh*X*X + (ah*bl + al*bh)*X + al*bl
2700 * Let k = (ah+al)*(bh+bl) = ah*bl + al*bh + ah*bh + al*bl
2701 * Then the original product is
2702 * ah*bh*X*X + (k - ah*bh - al*bl)*X + al*bl
2703 * By picking X to be a power of 2, "*X" is just shifting, and it's
2704 * been reduced to 3 multiplies on numbers half the size.
2707 /* We want to split based on the larger number; fiddle so that b
2710 if (asize
> bsize
) {
2720 /* Use gradeschool math when either number is too small. */
2721 i
= a
== b
? KARATSUBA_SQUARE_CUTOFF
: KARATSUBA_CUTOFF
;
2724 return _PyLong_New(0);
2729 /* If a is small compared to b, splitting on b gives a degenerate
2730 * case with ah==0, and Karatsuba may be (even much) less efficient
2731 * than "grade school" then. However, we can still win, by viewing
2732 * b as a string of "big digits", each of width a->ob_size. That
2733 * leads to a sequence of balanced calls to k_mul.
2735 if (2 * asize
<= bsize
)
2736 return k_lopsided_mul(a
, b
);
2738 /* Split a & b into hi & lo pieces. */
2740 if (kmul_split(a
, shift
, &ah
, &al
) < 0) goto fail
;
2741 assert(Py_SIZE(ah
) > 0); /* the split isn't degenerate */
2749 else if (kmul_split(b
, shift
, &bh
, &bl
) < 0) goto fail
;
2752 * 1. Allocate result space (asize + bsize digits: that's always
2754 * 2. Compute ah*bh, and copy into result at 2*shift.
2755 * 3. Compute al*bl, and copy into result at 0. Note that this
2756 * can't overlap with #2.
2757 * 4. Subtract al*bl from the result, starting at shift. This may
2758 * underflow (borrow out of the high digit), but we don't care:
2759 * we're effectively doing unsigned arithmetic mod
2760 * PyLong_BASE**(sizea + sizeb), and so long as the *final* result fits,
2761 * borrows and carries out of the high digit can be ignored.
2762 * 5. Subtract ah*bh from the result, starting at shift.
2763 * 6. Compute (ah+al)*(bh+bl), and add it into the result starting
2767 /* 1. Allocate result space. */
2768 ret
= _PyLong_New(asize
+ bsize
);
2769 if (ret
== NULL
) goto fail
;
2771 /* Fill with trash, to catch reference to uninitialized digits. */
2772 memset(ret
->ob_digit
, 0xDF, Py_SIZE(ret
) * sizeof(digit
));
2775 /* 2. t1 <- ah*bh, and copy into high digits of result. */
2776 if ((t1
= k_mul(ah
, bh
)) == NULL
) goto fail
;
2777 assert(Py_SIZE(t1
) >= 0);
2778 assert(2*shift
+ Py_SIZE(t1
) <= Py_SIZE(ret
));
2779 memcpy(ret
->ob_digit
+ 2*shift
, t1
->ob_digit
,
2780 Py_SIZE(t1
) * sizeof(digit
));
2782 /* Zero-out the digits higher than the ah*bh copy. */
2783 i
= Py_SIZE(ret
) - 2*shift
- Py_SIZE(t1
);
2785 memset(ret
->ob_digit
+ 2*shift
+ Py_SIZE(t1
), 0,
2788 /* 3. t2 <- al*bl, and copy into the low digits. */
2789 if ((t2
= k_mul(al
, bl
)) == NULL
) {
2793 assert(Py_SIZE(t2
) >= 0);
2794 assert(Py_SIZE(t2
) <= 2*shift
); /* no overlap with high digits */
2795 memcpy(ret
->ob_digit
, t2
->ob_digit
, Py_SIZE(t2
) * sizeof(digit
));
2797 /* Zero out remaining digits. */
2798 i
= 2*shift
- Py_SIZE(t2
); /* number of uninitialized digits */
2800 memset(ret
->ob_digit
+ Py_SIZE(t2
), 0, i
* sizeof(digit
));
2802 /* 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
2803 * because it's fresher in cache.
2805 i
= Py_SIZE(ret
) - shift
; /* # digits after shift */
2806 (void)v_isub(ret
->ob_digit
+ shift
, i
, t2
->ob_digit
, Py_SIZE(t2
));
2809 (void)v_isub(ret
->ob_digit
+ shift
, i
, t1
->ob_digit
, Py_SIZE(t1
));
2812 /* 6. t3 <- (ah+al)(bh+bl), and add into result. */
2813 if ((t1
= x_add(ah
, al
)) == NULL
) goto fail
;
2822 else if ((t2
= x_add(bh
, bl
)) == NULL
) {
2833 if (t3
== NULL
) goto fail
;
2834 assert(Py_SIZE(t3
) >= 0);
2836 /* Add t3. It's not obvious why we can't run out of room here.
2837 * See the (*) comment after this function.
2839 (void)v_iadd(ret
->ob_digit
+ shift
, i
, t3
->ob_digit
, Py_SIZE(t3
));
2842 return long_normalize(ret
);
2853 /* (*) Why adding t3 can't "run out of room" above.
2855 Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
2858 1. For any integer i, i = c(i/2) + f(i/2). In particular,
2859 bsize = c(bsize/2) + f(bsize/2).
2860 2. shift = f(bsize/2)
2862 4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
2863 routine, so asize > bsize/2 >= f(bsize/2) in this routine.
2865 We allocated asize + bsize result digits, and add t3 into them at an offset
2866 of shift. This leaves asize+bsize-shift allocated digit positions for t3
2867 to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
2868 asize + c(bsize/2) available digit positions.
2870 bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
2871 at most c(bsize/2) digits + 1 bit.
2873 If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
2874 digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
2875 most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
2877 The product (ah+al)*(bh+bl) therefore has at most
2879 c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
2881 and we have asize + c(bsize/2) available digit positions. We need to show
2882 this is always enough. An instance of c(bsize/2) cancels out in both, so
2883 the question reduces to whether asize digits is enough to hold
2884 (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
2885 then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
2886 asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
2887 digit is enough to hold 2 bits. This is so since PyLong_SHIFT=15 >= 2. If
2888 asize == bsize, then we're asking whether bsize digits is enough to hold
2889 c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
2890 is enough to hold 2 bits. This is so if bsize >= 2, which holds because
2891 bsize >= KARATSUBA_CUTOFF >= 2.
2893 Note that since there's always enough room for (ah+al)*(bh+bl), and that's
2894 clearly >= each of ah*bh and al*bl, there's always enough room to subtract
2895 ah*bh and al*bl too.
2898 /* b has at least twice the digits of a, and a is big enough that Karatsuba
2899 * would pay off *if* the inputs had balanced sizes. View b as a sequence
2900 * of slices, each with a->ob_size digits, and multiply the slices by a,
2901 * one at a time. This gives k_mul balanced inputs to work with, and is
2902 * also cache-friendly (we compute one double-width slice of the result
2903 * at a time, then move on, never backtracking except for the helpful
2904 * single-width slice overlap between successive partial sums).
2906 static PyLongObject
*
2907 k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
)
2909 const Py_ssize_t asize
= ABS(Py_SIZE(a
));
2910 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2911 Py_ssize_t nbdone
; /* # of b digits already multiplied */
2913 PyLongObject
*bslice
= NULL
;
2915 assert(asize
> KARATSUBA_CUTOFF
);
2916 assert(2 * asize
<= bsize
);
2918 /* Allocate result space, and zero it out. */
2919 ret
= _PyLong_New(asize
+ bsize
);
2922 memset(ret
->ob_digit
, 0, Py_SIZE(ret
) * sizeof(digit
));
2924 /* Successive slices of b are copied into bslice. */
2925 bslice
= _PyLong_New(asize
);
2931 PyLongObject
*product
;
2932 const Py_ssize_t nbtouse
= MIN(bsize
, asize
);
2934 /* Multiply the next slice of b by a. */
2935 memcpy(bslice
->ob_digit
, b
->ob_digit
+ nbdone
,
2936 nbtouse
* sizeof(digit
));
2937 Py_SIZE(bslice
) = nbtouse
;
2938 product
= k_mul(a
, bslice
);
2939 if (product
== NULL
)
2942 /* Add into result. */
2943 (void)v_iadd(ret
->ob_digit
+ nbdone
, Py_SIZE(ret
) - nbdone
,
2944 product
->ob_digit
, Py_SIZE(product
));
2952 return long_normalize(ret
);
2961 long_mul(PyLongObject
*v
, PyLongObject
*w
)
2963 PyLongObject
*a
, *b
, *z
;
2965 if (!convert_binop((PyObject
*)v
, (PyObject
*)w
, &a
, &b
)) {
2966 Py_INCREF(Py_NotImplemented
);
2967 return Py_NotImplemented
;
2971 /* Negate if exactly one of the inputs is negative. */
2972 if (((a
->ob_size
^ b
->ob_size
) < 0) && z
)
2973 z
->ob_size
= -(z
->ob_size
);
2976 return (PyObject
*)z
;
2979 /* The / and % operators are now defined in terms of divmod().
2980 The expression a mod b has the value a - b*floor(a/b).
2981 The long_divrem function gives the remainder after division of
2982 |a| by |b|, with the sign of a. This is also expressed
2983 as a - b*trunc(a/b), if trunc truncates towards zero.
2990 So, to get from rem to mod, we have to add b if a and b
2991 have different signs. We then subtract one from the 'div'
2992 part of the outcome to keep the invariant intact. */
2995 * *pdiv, *pmod = divmod(v, w)
2996 * NULL can be passed for pdiv or pmod, in which case that part of
2997 * the result is simply thrown away. The caller owns a reference to
2998 * each of these it requests (does not pass NULL for).
3001 l_divmod(PyLongObject
*v
, PyLongObject
*w
,
3002 PyLongObject
**pdiv
, PyLongObject
**pmod
)
3004 PyLongObject
*div
, *mod
;
3006 if (long_divrem(v
, w
, &div
, &mod
) < 0)
3008 if ((Py_SIZE(mod
) < 0 && Py_SIZE(w
) > 0) ||
3009 (Py_SIZE(mod
) > 0 && Py_SIZE(w
) < 0)) {
3012 temp
= (PyLongObject
*) long_add(mod
, w
);
3019 one
= (PyLongObject
*) PyLong_FromLong(1L);
3021 (temp
= (PyLongObject
*) long_sub(div
, one
)) == NULL
) {
3045 long_div(PyObject
*v
, PyObject
*w
)
3047 PyLongObject
*a
, *b
, *div
;
3049 CONVERT_BINOP(v
, w
, &a
, &b
);
3050 if (l_divmod(a
, b
, &div
, NULL
) < 0)
3054 return (PyObject
*)div
;
3058 long_classic_div(PyObject
*v
, PyObject
*w
)
3060 PyLongObject
*a
, *b
, *div
;
3062 CONVERT_BINOP(v
, w
, &a
, &b
);
3063 if (Py_DivisionWarningFlag
&&
3064 PyErr_Warn(PyExc_DeprecationWarning
, "classic long division") < 0)
3066 else if (l_divmod(a
, b
, &div
, NULL
) < 0)
3070 return (PyObject
*)div
;
3073 /* PyLong/PyLong -> float, with correctly rounded result. */
3075 #define MANT_DIG_DIGITS (DBL_MANT_DIG / PyLong_SHIFT)
3076 #define MANT_DIG_BITS (DBL_MANT_DIG % PyLong_SHIFT)
3079 long_true_divide(PyObject
*v
, PyObject
*w
)
3081 PyLongObject
*a
, *b
, *x
;
3082 Py_ssize_t a_size
, b_size
, shift
, extra_bits
, diff
, x_size
, x_bits
;
3084 int inexact
, negate
, a_is_small
, b_is_small
;
3087 CONVERT_BINOP(v
, w
, &a
, &b
);
3090 Method in a nutshell:
3092 0. reduce to case a, b > 0; filter out obvious underflow/overflow
3093 1. choose a suitable integer 'shift'
3094 2. use integer arithmetic to compute x = floor(2**-shift*a/b)
3095 3. adjust x for correct rounding
3096 4. convert x to a double dx with the same value
3097 5. return ldexp(dx, shift).
3101 0. For any a, a/0 raises ZeroDivisionError; for nonzero b, 0/b
3102 returns either 0.0 or -0.0, depending on the sign of b. For a and
3103 b both nonzero, ignore signs of a and b, and add the sign back in
3104 at the end. Now write a_bits and b_bits for the bit lengths of a
3105 and b respectively (that is, a_bits = 1 + floor(log_2(a)); likewise
3108 2**(a_bits - b_bits - 1) < a/b < 2**(a_bits - b_bits + 1).
3110 So if a_bits - b_bits > DBL_MAX_EXP then a/b > 2**DBL_MAX_EXP and
3111 so overflows. Similarly, if a_bits - b_bits < DBL_MIN_EXP -
3112 DBL_MANT_DIG - 1 then a/b underflows to 0. With these cases out of
3113 the way, we can assume that
3115 DBL_MIN_EXP - DBL_MANT_DIG - 1 <= a_bits - b_bits <= DBL_MAX_EXP.
3117 1. The integer 'shift' is chosen so that x has the right number of
3118 bits for a double, plus two or three extra bits that will be used
3119 in the rounding decisions. Writing a_bits and b_bits for the
3120 number of significant bits in a and b respectively, a
3121 straightforward formula for shift is:
3123 shift = a_bits - b_bits - DBL_MANT_DIG - 2
3125 This is fine in the usual case, but if a/b is smaller than the
3126 smallest normal float then it can lead to double rounding on an
3127 IEEE 754 platform, giving incorrectly rounded results. So we
3128 adjust the formula slightly. The actual formula used is:
3130 shift = MAX(a_bits - b_bits, DBL_MIN_EXP) - DBL_MANT_DIG - 2
3132 2. The quantity x is computed by first shifting a (left -shift bits
3133 if shift <= 0, right shift bits if shift > 0) and then dividing by
3134 b. For both the shift and the division, we keep track of whether
3135 the result is inexact, in a flag 'inexact'; this information is
3136 needed at the rounding stage.
3138 With the choice of shift above, together with our assumption that
3139 a_bits - b_bits >= DBL_MIN_EXP - DBL_MANT_DIG - 1, it follows
3142 3. Now x * 2**shift <= a/b < (x+1) * 2**shift. We want to replace
3143 this with an exactly representable float of the form
3145 round(x/2**extra_bits) * 2**(extra_bits+shift).
3147 For float representability, we need x/2**extra_bits <
3148 2**DBL_MANT_DIG and extra_bits + shift >= DBL_MIN_EXP -
3149 DBL_MANT_DIG. This translates to the condition:
3151 extra_bits >= MAX(x_bits, DBL_MIN_EXP - shift) - DBL_MANT_DIG
3153 To round, we just modify the bottom digit of x in-place; this can
3154 end up giving a digit with value > PyLONG_MASK, but that's not a
3155 problem since digits can hold values up to 2*PyLONG_MASK+1.
3157 With the original choices for shift above, extra_bits will always
3158 be 2 or 3. Then rounding under the round-half-to-even rule, we
3159 round up iff the most significant of the extra bits is 1, and
3160 either: (a) the computation of x in step 2 had an inexact result,
3161 or (b) at least one other of the extra bits is 1, or (c) the least
3162 significant bit of x (above those to be rounded) is 1.
3164 4. Conversion to a double is straightforward; all floating-point
3165 operations involved in the conversion are exact, so there's no
3166 danger of rounding errors.
3168 5. Use ldexp(x, shift) to compute x*2**shift, the final result.
3169 The result will always be exactly representable as a double, except
3170 in the case that it overflows. To avoid dependence on the exact
3171 behaviour of ldexp on overflow, we check for overflow before
3172 applying ldexp. The result of ldexp is adjusted for sign before
3176 /* Reduce to case where a and b are both positive. */
3177 a_size
= ABS(Py_SIZE(a
));
3178 b_size
= ABS(Py_SIZE(b
));
3179 negate
= (Py_SIZE(a
) < 0) ^ (Py_SIZE(b
) < 0);
3181 PyErr_SetString(PyExc_ZeroDivisionError
,
3182 "division by zero");
3186 goto underflow_or_zero
;
3188 /* Fast path for a and b small (exactly representable in a double).
3189 Relies on floating-point division being correctly rounded; results
3190 may be subject to double rounding on x86 machines that operate with
3191 the x87 FPU set to 64-bit precision. */
3192 a_is_small
= a_size
<= MANT_DIG_DIGITS
||
3193 (a_size
== MANT_DIG_DIGITS
+1 &&
3194 a
->ob_digit
[MANT_DIG_DIGITS
] >> MANT_DIG_BITS
== 0);
3195 b_is_small
= b_size
<= MANT_DIG_DIGITS
||
3196 (b_size
== MANT_DIG_DIGITS
+1 &&
3197 b
->ob_digit
[MANT_DIG_DIGITS
] >> MANT_DIG_BITS
== 0);
3198 if (a_is_small
&& b_is_small
) {
3200 da
= a
->ob_digit
[--a_size
];
3202 da
= da
* PyLong_BASE
+ a
->ob_digit
[--a_size
];
3203 db
= b
->ob_digit
[--b_size
];
3205 db
= db
* PyLong_BASE
+ b
->ob_digit
[--b_size
];
3210 /* Catch obvious cases of underflow and overflow */
3211 diff
= a_size
- b_size
;
3212 if (diff
> PY_SSIZE_T_MAX
/PyLong_SHIFT
- 1)
3213 /* Extreme overflow */
3215 else if (diff
< 1 - PY_SSIZE_T_MAX
/PyLong_SHIFT
)
3216 /* Extreme underflow */
3217 goto underflow_or_zero
;
3218 /* Next line is now safe from overflowing a Py_ssize_t */
3219 diff
= diff
* PyLong_SHIFT
+ bits_in_digit(a
->ob_digit
[a_size
- 1]) -
3220 bits_in_digit(b
->ob_digit
[b_size
- 1]);
3221 /* Now diff = a_bits - b_bits. */
3222 if (diff
> DBL_MAX_EXP
)
3224 else if (diff
< DBL_MIN_EXP
- DBL_MANT_DIG
- 1)
3225 goto underflow_or_zero
;
3227 /* Choose value for shift; see comments for step 1 above. */
3228 shift
= MAX(diff
, DBL_MIN_EXP
) - DBL_MANT_DIG
- 2;
3232 /* x = abs(a * 2**-shift) */
3234 Py_ssize_t i
, shift_digits
= -shift
/ PyLong_SHIFT
;
3236 /* x = a << -shift */
3237 if (a_size
>= PY_SSIZE_T_MAX
- 1 - shift_digits
) {
3238 /* In practice, it's probably impossible to end up
3239 here. Both a and b would have to be enormous,
3240 using close to SIZE_T_MAX bytes of memory each. */
3241 PyErr_SetString(PyExc_OverflowError
,
3242 "intermediate overflow during division");
3245 x
= _PyLong_New(a_size
+ shift_digits
+ 1);
3248 for (i
= 0; i
< shift_digits
; i
++)
3250 rem
= v_lshift(x
->ob_digit
+ shift_digits
, a
->ob_digit
,
3251 a_size
, -shift
% PyLong_SHIFT
);
3252 x
->ob_digit
[a_size
+ shift_digits
] = rem
;
3255 Py_ssize_t shift_digits
= shift
/ PyLong_SHIFT
;
3257 /* x = a >> shift */
3258 assert(a_size
>= shift_digits
);
3259 x
= _PyLong_New(a_size
- shift_digits
);
3262 rem
= v_rshift(x
->ob_digit
, a
->ob_digit
+ shift_digits
,
3263 a_size
- shift_digits
, shift
% PyLong_SHIFT
);
3264 /* set inexact if any of the bits shifted out is nonzero */
3267 while (!inexact
&& shift_digits
> 0)
3268 if (a
->ob_digit
[--shift_digits
])
3272 x_size
= Py_SIZE(x
);
3274 /* x //= b. If the remainder is nonzero, set inexact. We own the only
3275 reference to x, so it's safe to modify it in-place. */
3277 digit rem
= inplace_divrem1(x
->ob_digit
, x
->ob_digit
, x_size
,
3284 PyLongObject
*div
, *rem
;
3285 div
= x_divrem(x
, b
, &rem
);
3294 x_size
= ABS(Py_SIZE(x
));
3295 assert(x_size
> 0); /* result of division is never zero */
3296 x_bits
= (x_size
-1)*PyLong_SHIFT
+bits_in_digit(x
->ob_digit
[x_size
-1]);
3298 /* The number of extra bits that have to be rounded away. */
3299 extra_bits
= MAX(x_bits
, DBL_MIN_EXP
- shift
) - DBL_MANT_DIG
;
3300 assert(extra_bits
== 2 || extra_bits
== 3);
3302 /* Round by directly modifying the low digit of x. */
3303 mask
= (digit
)1 << (extra_bits
- 1);
3304 low
= x
->ob_digit
[0] | inexact
;
3305 if (low
& mask
&& low
& (3*mask
-1))
3307 x
->ob_digit
[0] = low
& ~(mask
-1U);
3309 /* Convert x to a double dx; the conversion is exact. */
3310 dx
= x
->ob_digit
[--x_size
];
3312 dx
= dx
* PyLong_BASE
+ x
->ob_digit
[--x_size
];
3315 /* Check whether ldexp result will overflow a double. */
3316 if (shift
+ x_bits
>= DBL_MAX_EXP
&&
3317 (shift
+ x_bits
> DBL_MAX_EXP
|| dx
== ldexp(1.0, (int)x_bits
)))
3319 result
= ldexp(dx
, (int)shift
);
3324 return PyFloat_FromDouble(negate
? -result
: result
);
3329 return PyFloat_FromDouble(negate
? -0.0 : 0.0);
3332 PyErr_SetString(PyExc_OverflowError
,
3333 "integer division result too large for a float");
3341 long_mod(PyObject
*v
, PyObject
*w
)
3343 PyLongObject
*a
, *b
, *mod
;
3345 CONVERT_BINOP(v
, w
, &a
, &b
);
3347 if (l_divmod(a
, b
, NULL
, &mod
) < 0)
3351 return (PyObject
*)mod
;
3355 long_divmod(PyObject
*v
, PyObject
*w
)
3357 PyLongObject
*a
, *b
, *div
, *mod
;
3360 CONVERT_BINOP(v
, w
, &a
, &b
);
3362 if (l_divmod(a
, b
, &div
, &mod
) < 0) {
3369 PyTuple_SetItem(z
, 0, (PyObject
*) div
);
3370 PyTuple_SetItem(z
, 1, (PyObject
*) mod
);
3383 long_pow(PyObject
*v
, PyObject
*w
, PyObject
*x
)
3385 PyLongObject
*a
, *b
, *c
; /* a,b,c = v,w,x */
3386 int negativeOutput
= 0; /* if x<0 return negative output */
3388 PyLongObject
*z
= NULL
; /* accumulated result */
3389 Py_ssize_t i
, j
, k
; /* counters */
3390 PyLongObject
*temp
= NULL
;
3392 /* 5-ary values. If the exponent is large enough, table is
3393 * precomputed so that table[i] == a**i % c for i in range(32).
3395 PyLongObject
*table
[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3396 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3398 /* a, b, c = v, w, x */
3399 CONVERT_BINOP(v
, w
, &a
, &b
);
3400 if (PyLong_Check(x
)) {
3401 c
= (PyLongObject
*)x
;
3404 else if (PyInt_Check(x
)) {
3405 c
= (PyLongObject
*)PyLong_FromLong(PyInt_AS_LONG(x
));
3409 else if (x
== Py_None
)
3414 Py_INCREF(Py_NotImplemented
);
3415 return Py_NotImplemented
;
3418 if (Py_SIZE(b
) < 0) { /* if exponent is negative */
3420 PyErr_SetString(PyExc_TypeError
, "pow() 2nd argument "
3421 "cannot be negative when 3rd argument specified");
3425 /* else return a float. This works because we know
3426 that this calls float_pow() which converts its
3427 arguments to double. */
3430 return PyFloat_Type
.tp_as_number
->nb_power(v
, w
, x
);
3436 raise ValueError() */
3437 if (Py_SIZE(c
) == 0) {
3438 PyErr_SetString(PyExc_ValueError
,
3439 "pow() 3rd argument cannot be 0");
3444 negativeOutput = True
3445 modulus = -modulus */
3446 if (Py_SIZE(c
) < 0) {
3448 temp
= (PyLongObject
*)_PyLong_Copy(c
);
3454 c
->ob_size
= - c
->ob_size
;
3459 if ((Py_SIZE(c
) == 1) && (c
->ob_digit
[0] == 1)) {
3460 z
= (PyLongObject
*)PyLong_FromLong(0L);
3465 base = base % modulus
3466 Having the base positive just makes things easier. */
3467 if (Py_SIZE(a
) < 0) {
3468 if (l_divmod(a
, c
, NULL
, &temp
) < 0)
3476 /* At this point a, b, and c are guaranteed non-negative UNLESS
3477 c is NULL, in which case a may be negative. */
3479 z
= (PyLongObject
*)PyLong_FromLong(1L);
3483 /* Perform a modular reduction, X = X % c, but leave X alone if c
3489 if (l_divmod(X, c, NULL, &temp) < 0) \
3497 /* Multiply two values, then reduce the result:
3498 result = X*Y % c. If c is NULL, skip the mod. */
3499 #define MULT(X, Y, result) \
3501 temp = (PyLongObject *)long_mul(X, Y); \
3504 Py_XDECREF(result); \
3510 if (Py_SIZE(b
) <= FIVEARY_CUTOFF
) {
3511 /* Left-to-right binary exponentiation (HAC Algorithm 14.79) */
3512 /* http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf */
3513 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3514 digit bi
= b
->ob_digit
[i
];
3516 for (j
= (digit
)1 << (PyLong_SHIFT
-1); j
!= 0; j
>>= 1) {
3524 /* Left-to-right 5-ary exponentiation (HAC Algorithm 14.82) */
3525 Py_INCREF(z
); /* still holds 1L */
3527 for (i
= 1; i
< 32; ++i
)
3528 MULT(table
[i
-1], a
, table
[i
]);
3530 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3531 const digit bi
= b
->ob_digit
[i
];
3533 for (j
= PyLong_SHIFT
- 5; j
>= 0; j
-= 5) {
3534 const int index
= (bi
>> j
) & 0x1f;
3535 for (k
= 0; k
< 5; ++k
)
3538 MULT(z
, table
[index
], z
);
3543 if (negativeOutput
&& (Py_SIZE(z
) != 0)) {
3544 temp
= (PyLongObject
*)long_sub(z
, c
);
3560 if (Py_SIZE(b
) > FIVEARY_CUTOFF
) {
3561 for (i
= 0; i
< 32; ++i
)
3562 Py_XDECREF(table
[i
]);
3568 return (PyObject
*)z
;
3572 long_invert(PyLongObject
*v
)
3574 /* Implement ~x as -(x+1) */
3577 w
= (PyLongObject
*)PyLong_FromLong(1L);
3580 x
= (PyLongObject
*) long_add(v
, w
);
3584 Py_SIZE(x
) = -(Py_SIZE(x
));
3585 return (PyObject
*)x
;
3589 long_neg(PyLongObject
*v
)
3592 if (v
->ob_size
== 0 && PyLong_CheckExact(v
)) {
3595 return (PyObject
*) v
;
3597 z
= (PyLongObject
*)_PyLong_Copy(v
);
3599 z
->ob_size
= -(v
->ob_size
);
3600 return (PyObject
*)z
;
3604 long_abs(PyLongObject
*v
)
3609 return long_long((PyObject
*)v
);
3613 long_nonzero(PyLongObject
*v
)
3615 return Py_SIZE(v
) != 0;
3619 long_rshift(PyLongObject
*v
, PyLongObject
*w
)
3621 PyLongObject
*a
, *b
;
3622 PyLongObject
*z
= NULL
;
3623 Py_ssize_t shiftby
, newsize
, wordshift
, loshift
, hishift
, i
, j
;
3624 digit lomask
, himask
;
3626 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
3628 if (Py_SIZE(a
) < 0) {
3629 /* Right shifting negative numbers is harder */
3630 PyLongObject
*a1
, *a2
;
3631 a1
= (PyLongObject
*) long_invert(a
);
3634 a2
= (PyLongObject
*) long_rshift(a1
, b
);
3638 z
= (PyLongObject
*) long_invert(a2
);
3642 shiftby
= PyLong_AsSsize_t((PyObject
*)b
);
3643 if (shiftby
== -1L && PyErr_Occurred())
3646 PyErr_SetString(PyExc_ValueError
,
3647 "negative shift count");
3650 wordshift
= shiftby
/ PyLong_SHIFT
;
3651 newsize
= ABS(Py_SIZE(a
)) - wordshift
;
3656 return (PyObject
*)z
;
3658 loshift
= shiftby
% PyLong_SHIFT
;
3659 hishift
= PyLong_SHIFT
- loshift
;
3660 lomask
= ((digit
)1 << hishift
) - 1;
3661 himask
= PyLong_MASK
^ lomask
;
3662 z
= _PyLong_New(newsize
);
3666 Py_SIZE(z
) = -(Py_SIZE(z
));
3667 for (i
= 0, j
= wordshift
; i
< newsize
; i
++, j
++) {
3668 z
->ob_digit
[i
] = (a
->ob_digit
[j
] >> loshift
) & lomask
;
3670 z
->ob_digit
[i
] |= (a
->ob_digit
[j
+1] << hishift
) & himask
;
3672 z
= long_normalize(z
);
3677 return (PyObject
*) z
;
3682 long_lshift(PyObject
*v
, PyObject
*w
)
3684 /* This version due to Tim Peters */
3685 PyLongObject
*a
, *b
;
3686 PyLongObject
*z
= NULL
;
3687 Py_ssize_t shiftby
, oldsize
, newsize
, wordshift
, remshift
, i
, j
;
3690 CONVERT_BINOP(v
, w
, &a
, &b
);
3692 shiftby
= PyLong_AsSsize_t((PyObject
*)b
);
3693 if (shiftby
== -1L && PyErr_Occurred())
3696 PyErr_SetString(PyExc_ValueError
, "negative shift count");
3699 /* wordshift, remshift = divmod(shiftby, PyLong_SHIFT) */
3700 wordshift
= shiftby
/ PyLong_SHIFT
;
3701 remshift
= shiftby
- wordshift
* PyLong_SHIFT
;
3703 oldsize
= ABS(a
->ob_size
);
3704 newsize
= oldsize
+ wordshift
;
3707 z
= _PyLong_New(newsize
);
3711 z
->ob_size
= -(z
->ob_size
);
3712 for (i
= 0; i
< wordshift
; i
++)
3715 for (i
= wordshift
, j
= 0; j
< oldsize
; i
++, j
++) {
3716 accum
|= (twodigits
)a
->ob_digit
[j
] << remshift
;
3717 z
->ob_digit
[i
] = (digit
)(accum
& PyLong_MASK
);
3718 accum
>>= PyLong_SHIFT
;
3721 z
->ob_digit
[newsize
-1] = (digit
)accum
;
3724 z
= long_normalize(z
);
3728 return (PyObject
*) z
;
3731 /* Compute two's complement of digit vector a[0:m], writing result to
3732 z[0:m]. The digit vector a need not be normalized, but should not
3733 be entirely zero. a and z may point to the same digit vector. */
3736 v_complement(digit
*z
, digit
*a
, Py_ssize_t m
)
3740 for (i
= 0; i
< m
; ++i
) {
3741 carry
+= a
[i
] ^ PyLong_MASK
;
3742 z
[i
] = carry
& PyLong_MASK
;
3743 carry
>>= PyLong_SHIFT
;
3748 /* Bitwise and/xor/or operations */
3751 long_bitwise(PyLongObject
*a
,
3752 int op
, /* '&', '|', '^' */
3755 int nega
, negb
, negz
;
3756 Py_ssize_t size_a
, size_b
, size_z
, i
;
3759 /* Bitwise operations for negative numbers operate as though
3760 on a two's complement representation. So convert arguments
3761 from sign-magnitude to two's complement, and convert the
3762 result back to sign-magnitude at the end. */
3764 /* If a is negative, replace it by its two's complement. */
3765 size_a
= ABS(Py_SIZE(a
));
3766 nega
= Py_SIZE(a
) < 0;
3768 z
= _PyLong_New(size_a
);
3771 v_complement(z
->ob_digit
, a
->ob_digit
, size_a
);
3775 /* Keep reference count consistent. */
3779 size_b
= ABS(Py_SIZE(b
));
3780 negb
= Py_SIZE(b
) < 0;
3782 z
= _PyLong_New(size_b
);
3787 v_complement(z
->ob_digit
, b
->ob_digit
, size_b
);
3793 /* Swap a and b if necessary to ensure size_a >= size_b. */
3794 if (size_a
< size_b
) {
3795 z
= a
; a
= b
; b
= z
;
3796 size_z
= size_a
; size_a
= size_b
; size_b
= size_z
;
3797 negz
= nega
; nega
= negb
; negb
= negz
;
3800 /* JRH: The original logic here was to allocate the result value (z)
3801 as the longer of the two operands. However, there are some cases
3802 where the result is guaranteed to be shorter than that: AND of two
3803 positives, OR of two negatives: use the shorter number. AND with
3804 mixed signs: use the positive number. OR with mixed signs: use the
3814 size_z
= negb
? size_a
: size_b
;
3818 size_z
= negb
? size_b
: size_a
;
3821 PyErr_BadArgument();
3825 /* We allow an extra digit if z is negative, to make sure that
3826 the final two's complement of z doesn't overflow. */
3827 z
= _PyLong_New(size_z
+ negz
);
3834 /* Compute digits for overlap of a and b. */
3837 for (i
= 0; i
< size_b
; ++i
)
3838 z
->ob_digit
[i
] = a
->ob_digit
[i
] & b
->ob_digit
[i
];
3841 for (i
= 0; i
< size_b
; ++i
)
3842 z
->ob_digit
[i
] = a
->ob_digit
[i
] | b
->ob_digit
[i
];
3845 for (i
= 0; i
< size_b
; ++i
)
3846 z
->ob_digit
[i
] = a
->ob_digit
[i
] ^ b
->ob_digit
[i
];
3849 PyErr_BadArgument();
3853 /* Copy any remaining digits of a, inverting if necessary. */
3854 if (op
== '^' && negb
)
3855 for (; i
< size_z
; ++i
)
3856 z
->ob_digit
[i
] = a
->ob_digit
[i
] ^ PyLong_MASK
;
3857 else if (i
< size_z
)
3858 memcpy(&z
->ob_digit
[i
], &a
->ob_digit
[i
],
3859 (size_z
-i
)*sizeof(digit
));
3861 /* Complement result if negative. */
3863 Py_SIZE(z
) = -(Py_SIZE(z
));
3864 z
->ob_digit
[size_z
] = PyLong_MASK
;
3865 v_complement(z
->ob_digit
, z
->ob_digit
, size_z
+1);
3870 return (PyObject
*)long_normalize(z
);
3874 long_and(PyObject
*v
, PyObject
*w
)
3876 PyLongObject
*a
, *b
;
3878 CONVERT_BINOP(v
, w
, &a
, &b
);
3879 c
= long_bitwise(a
, '&', b
);
3886 long_xor(PyObject
*v
, PyObject
*w
)
3888 PyLongObject
*a
, *b
;
3890 CONVERT_BINOP(v
, w
, &a
, &b
);
3891 c
= long_bitwise(a
, '^', b
);
3898 long_or(PyObject
*v
, PyObject
*w
)
3900 PyLongObject
*a
, *b
;
3902 CONVERT_BINOP(v
, w
, &a
, &b
);
3903 c
= long_bitwise(a
, '|', b
);
3910 long_coerce(PyObject
**pv
, PyObject
**pw
)
3912 if (PyInt_Check(*pw
)) {
3913 *pw
= PyLong_FromLong(PyInt_AS_LONG(*pw
));
3919 else if (PyLong_Check(*pw
)) {
3924 return 1; /* Can't do it */
3928 long_long(PyObject
*v
)
3930 if (PyLong_CheckExact(v
))
3933 v
= _PyLong_Copy((PyLongObject
*)v
);
3938 long_int(PyObject
*v
)
3941 x
= PyLong_AsLong(v
);
3942 if (PyErr_Occurred()) {
3943 if (PyErr_ExceptionMatches(PyExc_OverflowError
)) {
3945 if (PyLong_CheckExact(v
)) {
3950 return _PyLong_Copy((PyLongObject
*)v
);
3955 return PyInt_FromLong(x
);
3959 long_float(PyObject
*v
)
3962 result
= PyLong_AsDouble(v
);
3963 if (result
== -1.0 && PyErr_Occurred())
3965 return PyFloat_FromDouble(result
);
3969 long_oct(PyObject
*v
)
3971 return _PyLong_Format(v
, 8, 1, 0);
3975 long_hex(PyObject
*v
)
3977 return _PyLong_Format(v
, 16, 1, 0);
3981 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
);
3984 long_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
3987 int base
= -909; /* unlikely! */
3988 static char *kwlist
[] = {"x", "base", 0};
3990 if (type
!= &PyLong_Type
)
3991 return long_subtype_new(type
, args
, kwds
); /* Wimp out */
3992 if (!PyArg_ParseTupleAndKeywords(args
, kwds
, "|Oi:long", kwlist
,
3996 return PyLong_FromLong(0L);
3998 return PyNumber_Long(x
);
3999 else if (PyString_Check(x
)) {
4000 /* Since PyLong_FromString doesn't have a length parameter,
4001 * check here for possible NULs in the string. */
4002 char *string
= PyString_AS_STRING(x
);
4003 if (strlen(string
) != (size_t)PyString_Size(x
)) {
4004 /* create a repr() of the input string,
4005 * just like PyLong_FromString does. */
4007 srepr
= PyObject_Repr(x
);
4010 PyErr_Format(PyExc_ValueError
,
4011 "invalid literal for long() with base %d: %s",
4012 base
, PyString_AS_STRING(srepr
));
4016 return PyLong_FromString(PyString_AS_STRING(x
), NULL
, base
);
4018 #ifdef Py_USING_UNICODE
4019 else if (PyUnicode_Check(x
))
4020 return PyLong_FromUnicode(PyUnicode_AS_UNICODE(x
),
4021 PyUnicode_GET_SIZE(x
),
4025 PyErr_SetString(PyExc_TypeError
,
4026 "long() can't convert non-string with explicit base");
4031 /* Wimpy, slow approach to tp_new calls for subtypes of long:
4032 first create a regular long from whatever arguments we got,
4033 then allocate a subtype instance and initialize it from
4034 the regular long. The regular long is then thrown away.
4037 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
4039 PyLongObject
*tmp
, *newobj
;
4042 assert(PyType_IsSubtype(type
, &PyLong_Type
));
4043 tmp
= (PyLongObject
*)long_new(&PyLong_Type
, args
, kwds
);
4046 assert(PyLong_CheckExact(tmp
));
4050 newobj
= (PyLongObject
*)type
->tp_alloc(type
, n
);
4051 if (newobj
== NULL
) {
4055 assert(PyLong_Check(newobj
));
4056 Py_SIZE(newobj
) = Py_SIZE(tmp
);
4057 for (i
= 0; i
< n
; i
++)
4058 newobj
->ob_digit
[i
] = tmp
->ob_digit
[i
];
4060 return (PyObject
*)newobj
;
4064 long_getnewargs(PyLongObject
*v
)
4066 return Py_BuildValue("(N)", _PyLong_Copy(v
));
4070 long_get0(PyLongObject
*v
, void *context
) {
4071 return PyLong_FromLong(0L);
4075 long_get1(PyLongObject
*v
, void *context
) {
4076 return PyLong_FromLong(1L);
4080 long__format__(PyObject
*self
, PyObject
*args
)
4082 PyObject
*format_spec
;
4084 if (!PyArg_ParseTuple(args
, "O:__format__", &format_spec
))
4086 if (PyBytes_Check(format_spec
))
4087 return _PyLong_FormatAdvanced(self
,
4088 PyBytes_AS_STRING(format_spec
),
4089 PyBytes_GET_SIZE(format_spec
));
4090 if (PyUnicode_Check(format_spec
)) {
4091 /* Convert format_spec to a str */
4093 PyObject
*str_spec
= PyObject_Str(format_spec
);
4095 if (str_spec
== NULL
)
4098 result
= _PyLong_FormatAdvanced(self
,
4099 PyBytes_AS_STRING(str_spec
),
4100 PyBytes_GET_SIZE(str_spec
));
4102 Py_DECREF(str_spec
);
4105 PyErr_SetString(PyExc_TypeError
, "__format__ requires str or unicode");
4110 long_sizeof(PyLongObject
*v
)
4114 res
= v
->ob_type
->tp_basicsize
+ ABS(Py_SIZE(v
))*sizeof(digit
);
4115 return PyInt_FromSsize_t(res
);
4119 long_bit_length(PyLongObject
*v
)
4121 PyLongObject
*result
, *x
, *y
;
4122 Py_ssize_t ndigits
, msd_bits
= 0;
4126 assert(PyLong_Check(v
));
4128 ndigits
= ABS(Py_SIZE(v
));
4130 return PyInt_FromLong(0);
4132 msd
= v
->ob_digit
[ndigits
-1];
4137 msd_bits
+= (long)(BitLengthTable
[msd
]);
4139 if (ndigits
<= PY_SSIZE_T_MAX
/PyLong_SHIFT
)
4140 return PyInt_FromSsize_t((ndigits
-1)*PyLong_SHIFT
+ msd_bits
);
4142 /* expression above may overflow; use Python integers instead */
4143 result
= (PyLongObject
*)PyLong_FromSsize_t(ndigits
- 1);
4146 x
= (PyLongObject
*)PyLong_FromLong(PyLong_SHIFT
);
4149 y
= (PyLongObject
*)long_mul(result
, x
);
4156 x
= (PyLongObject
*)PyLong_FromLong((long)msd_bits
);
4159 y
= (PyLongObject
*)long_add(result
, x
);
4166 return (PyObject
*)result
;
4173 PyDoc_STRVAR(long_bit_length_doc
,
4174 "long.bit_length() -> int or long\n\
4176 Number of bits necessary to represent self in binary.\n\
4179 >>> (37L).bit_length()\n\
4184 long_is_finite(PyObject
*v
)
4190 static PyMethodDef long_methods
[] = {
4191 {"conjugate", (PyCFunction
)long_long
, METH_NOARGS
,
4192 "Returns self, the complex conjugate of any long."},
4193 {"bit_length", (PyCFunction
)long_bit_length
, METH_NOARGS
,
4194 long_bit_length_doc
},
4196 {"is_finite", (PyCFunction
)long_is_finite
, METH_NOARGS
,
4197 "Returns always True."},
4199 {"__trunc__", (PyCFunction
)long_long
, METH_NOARGS
,
4200 "Truncating an Integral returns itself."},
4201 {"__getnewargs__", (PyCFunction
)long_getnewargs
, METH_NOARGS
},
4202 {"__format__", (PyCFunction
)long__format__
, METH_VARARGS
},
4203 {"__sizeof__", (PyCFunction
)long_sizeof
, METH_NOARGS
,
4204 "Returns size in memory, in bytes"},
4205 {NULL
, NULL
} /* sentinel */
4208 static PyGetSetDef long_getset
[] = {
4210 (getter
)long_long
, (setter
)NULL
,
4211 "the real part of a complex number",
4214 (getter
)long_get0
, (setter
)NULL
,
4215 "the imaginary part of a complex number",
4218 (getter
)long_long
, (setter
)NULL
,
4219 "the numerator of a rational number in lowest terms",
4222 (getter
)long_get1
, (setter
)NULL
,
4223 "the denominator of a rational number in lowest terms",
4225 {NULL
} /* Sentinel */
4228 PyDoc_STRVAR(long_doc
,
4229 "long(x[, base]) -> integer\n\
4231 Convert a string or number to a long integer, if possible. A floating\n\
4232 point argument will be truncated towards zero (this does not include a\n\
4233 string representation of a floating point number!) When converting a\n\
4234 string, use the optional base. It is an error to supply a base when\n\
4235 converting a non-string.");
4237 static PyNumberMethods long_as_number
= {
4238 (binaryfunc
)long_add
, /*nb_add*/
4239 (binaryfunc
)long_sub
, /*nb_subtract*/
4240 (binaryfunc
)long_mul
, /*nb_multiply*/
4241 long_classic_div
, /*nb_divide*/
4242 long_mod
, /*nb_remainder*/
4243 long_divmod
, /*nb_divmod*/
4244 long_pow
, /*nb_power*/
4245 (unaryfunc
)long_neg
, /*nb_negative*/
4246 (unaryfunc
)long_long
, /*tp_positive*/
4247 (unaryfunc
)long_abs
, /*tp_absolute*/
4248 (inquiry
)long_nonzero
, /*tp_nonzero*/
4249 (unaryfunc
)long_invert
, /*nb_invert*/
4250 long_lshift
, /*nb_lshift*/
4251 (binaryfunc
)long_rshift
, /*nb_rshift*/
4252 long_and
, /*nb_and*/
4253 long_xor
, /*nb_xor*/
4255 long_coerce
, /*nb_coerce*/
4256 long_int
, /*nb_int*/
4257 long_long
, /*nb_long*/
4258 long_float
, /*nb_float*/
4259 long_oct
, /*nb_oct*/
4260 long_hex
, /*nb_hex*/
4261 0, /* nb_inplace_add */
4262 0, /* nb_inplace_subtract */
4263 0, /* nb_inplace_multiply */
4264 0, /* nb_inplace_divide */
4265 0, /* nb_inplace_remainder */
4266 0, /* nb_inplace_power */
4267 0, /* nb_inplace_lshift */
4268 0, /* nb_inplace_rshift */
4269 0, /* nb_inplace_and */
4270 0, /* nb_inplace_xor */
4271 0, /* nb_inplace_or */
4272 long_div
, /* nb_floor_divide */
4273 long_true_divide
, /* nb_true_divide */
4274 0, /* nb_inplace_floor_divide */
4275 0, /* nb_inplace_true_divide */
4276 long_long
, /* nb_index */
4279 PyTypeObject PyLong_Type
= {
4280 PyObject_HEAD_INIT(&PyType_Type
)
4282 "long", /* tp_name */
4283 offsetof(PyLongObject
, ob_digit
), /* tp_basicsize */
4284 sizeof(digit
), /* tp_itemsize */
4285 long_dealloc
, /* tp_dealloc */
4289 (cmpfunc
)long_compare
, /* tp_compare */
4290 long_repr
, /* tp_repr */
4291 &long_as_number
, /* tp_as_number */
4292 0, /* tp_as_sequence */
4293 0, /* tp_as_mapping */
4294 (hashfunc
)long_hash
, /* tp_hash */
4296 long_str
, /* tp_str */
4297 PyObject_GenericGetAttr
, /* tp_getattro */
4298 0, /* tp_setattro */
4299 0, /* tp_as_buffer */
4300 Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_CHECKTYPES
|
4301 Py_TPFLAGS_BASETYPE
| Py_TPFLAGS_LONG_SUBCLASS
, /* tp_flags */
4302 long_doc
, /* tp_doc */
4303 0, /* tp_traverse */
4305 0, /* tp_richcompare */
4306 0, /* tp_weaklistoffset */
4308 0, /* tp_iternext */
4309 long_methods
, /* tp_methods */
4311 long_getset
, /* tp_getset */
4314 0, /* tp_descr_get */
4315 0, /* tp_descr_set */
4316 0, /* tp_dictoffset */
4319 long_new
, /* tp_new */
4320 PyObject_Del
, /* tp_free */
4323 static PyTypeObject Long_InfoType
;
4325 PyDoc_STRVAR(long_info__doc__
,
4328 A struct sequence that holds information about Python's\n\
4329 internal representation of integers. The attributes are read only.");
4331 static PyStructSequence_Field long_info_fields
[] = {
4332 {"bits_per_digit", "size of a digit in bits"},
4333 {"sizeof_digit", "size in bytes of the C type used to represent a digit"},
4337 static PyStructSequence_Desc long_info_desc
= {
4338 "sys.long_info", /* name */
4339 long_info__doc__
, /* doc */
4340 long_info_fields
, /* fields */
4341 2 /* number of fields */
4345 PyLong_GetInfo(void)
4347 PyObject
* long_info
;
4349 long_info
= PyStructSequence_New(&Long_InfoType
);
4350 if (long_info
== NULL
)
4352 PyStructSequence_SET_ITEM(long_info
, field
++,
4353 PyInt_FromLong(PyLong_SHIFT
));
4354 PyStructSequence_SET_ITEM(long_info
, field
++,
4355 PyInt_FromLong(sizeof(digit
)));
4356 if (PyErr_Occurred()) {
4357 Py_CLEAR(long_info
);
4366 /* initialize long_info */
4367 if (Long_InfoType
.tp_name
== 0)
4368 PyStructSequence_InitType(&Long_InfoType
, &long_info_desc
);