2 Long (arbitrary precision) integer object implementation.
4 Copyright (c) 2015, Daryl McDaniel. All rights reserved.<BR>
5 This program and the accompanying materials are licensed and made available under
6 the terms and conditions of the BSD License that accompanies this distribution.
7 The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14 /* XXX The functional organization of this file is terrible */
17 #include "longintrepr.h"
18 #include "structseq.h"
24 /* For long multiplication, use the O(N**2) school algorithm unless
25 * both operands contain more than KARATSUBA_CUTOFF digits (this
26 * being an internal Python long digit, in base PyLong_BASE).
28 #define KARATSUBA_CUTOFF 70
29 #define KARATSUBA_SQUARE_CUTOFF (2 * KARATSUBA_CUTOFF)
31 /* For exponentiation, use the binary left-to-right algorithm
32 * unless the exponent contains more than FIVEARY_CUTOFF digits.
33 * In that case, do 5 bits at a time. The potential drawback is that
34 * a table of 2**5 intermediate results is computed.
36 #define FIVEARY_CUTOFF 8
39 #define ABS(x) ((x) < 0 ? -(x) : (x))
43 #define MAX(x, y) ((x) < (y) ? (y) : (x))
44 #define MIN(x, y) ((x) > (y) ? (y) : (x))
46 #define SIGCHECK(PyTryBlock) \
48 if (--_Py_Ticker < 0) { \
49 _Py_Ticker = _Py_CheckInterval; \
50 if (PyErr_CheckSignals()) PyTryBlock \
54 /* Normalize (remove leading zeros from) a long int object.
55 Doesn't attempt to free the storage--in most cases, due to the nature
56 of the algorithms used, this could save at most be one word anyway. */
59 long_normalize(register PyLongObject
*v
)
61 Py_ssize_t j
= ABS(Py_SIZE(v
));
64 while (i
> 0 && v
->ob_digit
[i
-1] == 0)
67 Py_SIZE(v
) = (Py_SIZE(v
) < 0) ? -(i
) : i
;
71 /* Allocate a new long int object with size digits.
72 Return NULL and set exception if we run out of memory. */
74 #define MAX_LONG_DIGITS \
75 ((PY_SSIZE_T_MAX - offsetof(PyLongObject, ob_digit))/sizeof(digit))
78 _PyLong_New(Py_ssize_t size
)
80 if (size
> (Py_ssize_t
)MAX_LONG_DIGITS
) {
81 PyErr_SetString(PyExc_OverflowError
,
82 "too many digits in integer");
85 /* coverity[ampersand_in_size] */
86 /* XXX(nnorwitz): PyObject_NEW_VAR / _PyObject_VAR_SIZE need to detect
88 return PyObject_NEW_VAR(PyLongObject
, &PyLong_Type
, size
);
92 _PyLong_Copy(PyLongObject
*src
)
101 result
= _PyLong_New(i
);
102 if (result
!= NULL
) {
103 result
->ob_size
= src
->ob_size
;
105 result
->ob_digit
[i
] = src
->ob_digit
[i
];
107 return (PyObject
*)result
;
110 /* Create a new long int object from a C long int */
113 PyLong_FromLong(long ival
)
116 unsigned long abs_ival
;
117 unsigned long t
; /* unsigned so >> doesn't propagate sign bit */
122 /* if LONG_MIN == -LONG_MAX-1 (true on most platforms) then
123 ANSI C says that the result of -ival is undefined when ival
124 == LONG_MIN. Hence the following workaround. */
125 abs_ival
= (unsigned long)(-1-ival
) + 1;
129 abs_ival
= (unsigned long)ival
;
132 /* Count the number of Python digits.
133 We used to pick 5 ("big enough for anything"), but that's a
134 waste of time and space given that 5*15 = 75 bits are rarely
141 v
= _PyLong_New(ndigits
);
143 digit
*p
= v
->ob_digit
;
144 v
->ob_size
= negative
? -ndigits
: ndigits
;
147 *p
++ = (digit
)(t
& PyLong_MASK
);
151 return (PyObject
*)v
;
154 /* Create a new long int object from a C unsigned long int */
157 PyLong_FromUnsignedLong(unsigned long ival
)
163 /* Count the number of Python digits. */
164 t
= (unsigned long)ival
;
169 v
= _PyLong_New(ndigits
);
171 digit
*p
= v
->ob_digit
;
172 Py_SIZE(v
) = ndigits
;
174 *p
++ = (digit
)(ival
& PyLong_MASK
);
175 ival
>>= PyLong_SHIFT
;
178 return (PyObject
*)v
;
181 /* Create a new long int object from a C double */
184 PyLong_FromDouble(double dval
)
188 int i
, ndig
, expo
, neg
;
190 if (Py_IS_INFINITY(dval
)) {
191 PyErr_SetString(PyExc_OverflowError
,
192 "cannot convert float infinity to integer");
195 if (Py_IS_NAN(dval
)) {
196 PyErr_SetString(PyExc_ValueError
,
197 "cannot convert float NaN to integer");
204 frac
= frexp(dval
, &expo
); /* dval = frac*2**expo; 0.0 <= frac < 1.0 */
206 return PyLong_FromLong(0L);
207 ndig
= (expo
-1) / PyLong_SHIFT
+ 1; /* Number of 'digits' in result */
208 v
= _PyLong_New(ndig
);
211 frac
= ldexp(frac
, (expo
-1) % PyLong_SHIFT
+ 1);
212 for (i
= ndig
; --i
>= 0; ) {
213 digit bits
= (digit
)frac
;
214 v
->ob_digit
[i
] = bits
;
215 frac
= frac
- (double)bits
;
216 frac
= ldexp(frac
, PyLong_SHIFT
);
219 Py_SIZE(v
) = -(Py_SIZE(v
));
220 return (PyObject
*)v
;
223 /* Checking for overflow in PyLong_AsLong is a PITA since C doesn't define
224 * anything about what happens when a signed integer operation overflows,
225 * and some compilers think they're doing you a favor by being "clever"
226 * then. The bit pattern for the largest postive signed long is
227 * (unsigned long)LONG_MAX, and for the smallest negative signed long
228 * it is abs(LONG_MIN), which we could write -(unsigned long)LONG_MIN.
229 * However, some other compilers warn about applying unary minus to an
230 * unsigned operand. Hence the weird "0-".
232 #define PY_ABS_LONG_MIN (0-(unsigned long)LONG_MIN)
233 #define PY_ABS_SSIZE_T_MIN (0-(size_t)PY_SSIZE_T_MIN)
235 /* Get a C long int from a Python long or Python int object.
236 On overflow, returns -1 and sets *overflow to 1 or -1 depending
237 on the sign of the result. Otherwise *overflow is 0.
239 For other errors (e.g., type error), returns -1 and sets an error
244 PyLong_AsLongAndOverflow(PyObject
*vv
, int *overflow
)
246 /* This version by Tim Peters */
247 register PyLongObject
*v
;
248 unsigned long x
, prev
;
252 int do_decref
= 0; /* if nb_int was called */
256 PyErr_BadInternalCall();
261 return PyInt_AsLong(vv
);
263 if (!PyLong_Check(vv
)) {
265 nb
= vv
->ob_type
->tp_as_number
;
266 if (nb
== NULL
|| nb
->nb_int
== NULL
) {
267 PyErr_SetString(PyExc_TypeError
,
268 "an integer is required");
271 vv
= (*nb
->nb_int
) (vv
);
275 if(PyInt_Check(vv
)) {
276 res
= PyInt_AsLong(vv
);
279 if (!PyLong_Check(vv
)) {
281 PyErr_SetString(PyExc_TypeError
,
282 "nb_int should return int object");
288 v
= (PyLongObject
*)vv
;
293 res
= -(sdigit
)v
->ob_digit
[0];
299 res
= v
->ob_digit
[0];
310 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
311 if ((x
>> PyLong_SHIFT
) != prev
) {
316 /* Haven't lost any bits, but casting to long requires extra
317 * care (see comment above).
319 if (x
<= (unsigned long)LONG_MAX
) {
320 res
= (long)x
* sign
;
322 else if (sign
< 0 && x
== PY_ABS_LONG_MIN
) {
327 /* res is already set to -1 */
337 /* Get a C long int from a long int object.
338 Returns -1 and sets an error condition if overflow occurs. */
341 PyLong_AsLong(PyObject
*obj
)
344 long result
= PyLong_AsLongAndOverflow(obj
, &overflow
);
346 /* XXX: could be cute and give a different
347 message for overflow == -1 */
348 PyErr_SetString(PyExc_OverflowError
,
349 "Python int too large to convert to C long");
354 /* Get a C int from a long int object or any object that has an __int__
355 method. Return -1 and set an error if overflow occurs. */
358 _PyLong_AsInt(PyObject
*obj
)
361 long result
= PyLong_AsLongAndOverflow(obj
, &overflow
);
362 if (overflow
|| result
> INT_MAX
|| result
< INT_MIN
) {
363 /* XXX: could be cute and give a different
364 message for overflow == -1 */
365 PyErr_SetString(PyExc_OverflowError
,
366 "Python int too large to convert to C int");
372 /* Get a Py_ssize_t from a long int object.
373 Returns -1 and sets an error condition if overflow occurs. */
376 PyLong_AsSsize_t(PyObject
*vv
) {
377 register PyLongObject
*v
;
382 if (vv
== NULL
|| !PyLong_Check(vv
)) {
383 PyErr_BadInternalCall();
386 v
= (PyLongObject
*)vv
;
396 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
397 if ((x
>> PyLong_SHIFT
) != prev
)
400 /* Haven't lost any bits, but casting to a signed type requires
401 * extra care (see comment above).
403 if (x
<= (size_t)PY_SSIZE_T_MAX
) {
404 return (Py_ssize_t
)x
* sign
;
406 else if (sign
< 0 && x
== PY_ABS_SSIZE_T_MIN
) {
407 return PY_SSIZE_T_MIN
;
412 PyErr_SetString(PyExc_OverflowError
,
413 "long int too large to convert to int");
417 /* Get a C unsigned long int from a long int object.
418 Returns -1 and sets an error condition if overflow occurs. */
421 PyLong_AsUnsignedLong(PyObject
*vv
)
423 register PyLongObject
*v
;
424 unsigned long x
, prev
;
427 if (vv
== NULL
|| !PyLong_Check(vv
)) {
428 if (vv
!= NULL
&& PyInt_Check(vv
)) {
429 long val
= PyInt_AsLong(vv
);
431 PyErr_SetString(PyExc_OverflowError
,
432 "can't convert negative value "
434 return (unsigned long) -1;
438 PyErr_BadInternalCall();
439 return (unsigned long) -1;
441 v
= (PyLongObject
*)vv
;
445 PyErr_SetString(PyExc_OverflowError
,
446 "can't convert negative value to unsigned long");
447 return (unsigned long) -1;
451 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
452 if ((x
>> PyLong_SHIFT
) != prev
) {
453 PyErr_SetString(PyExc_OverflowError
,
454 "long int too large to convert");
455 return (unsigned long) -1;
461 /* Get a C unsigned long int from a long int object, ignoring the high bits.
462 Returns -1 and sets an error condition if an error occurs. */
465 PyLong_AsUnsignedLongMask(PyObject
*vv
)
467 register PyLongObject
*v
;
472 if (vv
== NULL
|| !PyLong_Check(vv
)) {
473 if (vv
!= NULL
&& PyInt_Check(vv
))
474 return PyInt_AsUnsignedLongMask(vv
);
475 PyErr_BadInternalCall();
476 return (unsigned long) -1;
478 v
= (PyLongObject
*)vv
;
487 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
493 _PyLong_Sign(PyObject
*vv
)
495 PyLongObject
*v
= (PyLongObject
*)vv
;
498 assert(PyLong_Check(v
));
500 return Py_SIZE(v
) == 0 ? 0 : (Py_SIZE(v
) < 0 ? -1 : 1);
504 _PyLong_NumBits(PyObject
*vv
)
506 PyLongObject
*v
= (PyLongObject
*)vv
;
511 assert(PyLong_Check(v
));
512 ndigits
= ABS(Py_SIZE(v
));
513 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
515 digit msd
= v
->ob_digit
[ndigits
- 1];
517 result
= (ndigits
- 1) * PyLong_SHIFT
;
518 if (result
/ PyLong_SHIFT
!= (size_t)(ndigits
- 1))
530 PyErr_SetString(PyExc_OverflowError
, "long has too many bits "
531 "to express in a platform size_t");
536 _PyLong_FromByteArray(const unsigned char* bytes
, size_t n
,
537 int little_endian
, int is_signed
)
539 const unsigned char* pstartbyte
; /* LSB of bytes */
540 int incr
; /* direction to move pstartbyte */
541 const unsigned char* pendbyte
; /* MSB of bytes */
542 size_t numsignificantbytes
; /* number of bytes that matter */
543 Py_ssize_t ndigits
; /* number of Python long digits */
544 PyLongObject
* v
; /* result */
545 Py_ssize_t idigit
= 0; /* next free index in v->ob_digit */
548 return PyLong_FromLong(0L);
552 pendbyte
= bytes
+ n
- 1;
556 pstartbyte
= bytes
+ n
- 1;
562 is_signed
= *pendbyte
>= 0x80;
564 /* Compute numsignificantbytes. This consists of finding the most
565 significant byte. Leading 0 bytes are insignificant if the number
566 is positive, and leading 0xff bytes if negative. */
569 const unsigned char* p
= pendbyte
;
570 const int pincr
= -incr
; /* search MSB to LSB */
571 const unsigned char insignficant
= is_signed
? 0xff : 0x00;
573 for (i
= 0; i
< n
; ++i
, p
+= pincr
) {
574 if (*p
!= insignficant
)
577 numsignificantbytes
= n
- i
;
578 /* 2's-comp is a bit tricky here, e.g. 0xff00 == -0x0100, so
579 actually has 2 significant bytes. OTOH, 0xff0001 ==
580 -0x00ffff, so we wouldn't *need* to bump it there; but we
581 do for 0xffff = -0x0001. To be safe without bothering to
582 check every case, bump it regardless. */
583 if (is_signed
&& numsignificantbytes
< n
)
584 ++numsignificantbytes
;
587 /* How many Python long digits do we need? We have
588 8*numsignificantbytes bits, and each Python long digit has
589 PyLong_SHIFT bits, so it's the ceiling of the quotient. */
590 /* catch overflow before it happens */
591 if (numsignificantbytes
> (PY_SSIZE_T_MAX
- PyLong_SHIFT
) / 8) {
592 PyErr_SetString(PyExc_OverflowError
,
593 "byte array too long to convert to int");
596 ndigits
= (numsignificantbytes
* 8 + PyLong_SHIFT
- 1) / PyLong_SHIFT
;
597 v
= _PyLong_New(ndigits
);
601 /* Copy the bits over. The tricky parts are computing 2's-comp on
602 the fly for signed numbers, and dealing with the mismatch between
603 8-bit bytes and (probably) 15-bit Python digits.*/
606 twodigits carry
= 1; /* for 2's-comp calculation */
607 twodigits accum
= 0; /* sliding register */
608 unsigned int accumbits
= 0; /* number of bits in accum */
609 const unsigned char* p
= pstartbyte
;
611 for (i
= 0; i
< numsignificantbytes
; ++i
, p
+= incr
) {
612 twodigits thisbyte
= *p
;
613 /* Compute correction for 2's comp, if needed. */
615 thisbyte
= (0xff ^ thisbyte
) + carry
;
616 carry
= thisbyte
>> 8;
619 /* Because we're going LSB to MSB, thisbyte is
620 more significant than what's already in accum,
621 so needs to be prepended to accum. */
622 accum
|= (twodigits
)thisbyte
<< accumbits
;
624 if (accumbits
>= PyLong_SHIFT
) {
625 /* There's enough to fill a Python digit. */
626 assert(idigit
< ndigits
);
627 v
->ob_digit
[idigit
] = (digit
)(accum
& PyLong_MASK
);
629 accum
>>= PyLong_SHIFT
;
630 accumbits
-= PyLong_SHIFT
;
631 assert(accumbits
< PyLong_SHIFT
);
634 assert(accumbits
< PyLong_SHIFT
);
636 assert(idigit
< ndigits
);
637 v
->ob_digit
[idigit
] = (digit
)accum
;
642 Py_SIZE(v
) = is_signed
? -idigit
: idigit
;
643 return (PyObject
*)long_normalize(v
);
647 _PyLong_AsByteArray(PyLongObject
* v
,
648 unsigned char* bytes
, size_t n
,
649 int little_endian
, int is_signed
)
651 Py_ssize_t i
; /* index into v->ob_digit */
652 Py_ssize_t ndigits
; /* |v->ob_size| */
653 twodigits accum
; /* sliding register */
654 unsigned int accumbits
; /* # bits in accum */
655 int do_twos_comp
; /* store 2's-comp? is_signed and v < 0 */
656 digit carry
; /* for computing 2's-comp */
657 size_t j
; /* # bytes filled */
658 unsigned char* p
; /* pointer to next byte in bytes */
659 int pincr
; /* direction to move p */
661 assert(v
!= NULL
&& PyLong_Check(v
));
663 if (Py_SIZE(v
) < 0) {
664 ndigits
= -(Py_SIZE(v
));
666 PyErr_SetString(PyExc_OverflowError
,
667 "can't convert negative long to unsigned");
673 ndigits
= Py_SIZE(v
);
686 /* Copy over all the Python digits.
687 It's crucial that every Python digit except for the MSD contribute
688 exactly PyLong_SHIFT bits to the total, so first assert that the long is
690 assert(ndigits
== 0 || v
->ob_digit
[ndigits
- 1] != 0);
694 carry
= do_twos_comp
? 1 : 0;
695 for (i
= 0; i
< ndigits
; ++i
) {
696 digit thisdigit
= v
->ob_digit
[i
];
698 thisdigit
= (thisdigit
^ PyLong_MASK
) + carry
;
699 carry
= thisdigit
>> PyLong_SHIFT
;
700 thisdigit
&= PyLong_MASK
;
702 /* Because we're going LSB to MSB, thisdigit is more
703 significant than what's already in accum, so needs to be
704 prepended to accum. */
705 accum
|= (twodigits
)thisdigit
<< accumbits
;
707 /* The most-significant digit may be (probably is) at least
709 if (i
== ndigits
- 1) {
710 /* Count # of sign bits -- they needn't be stored,
711 * although for signed conversion we need later to
712 * make sure at least one sign bit gets stored. */
713 digit s
= do_twos_comp
? thisdigit
^ PyLong_MASK
: thisdigit
;
720 accumbits
+= PyLong_SHIFT
;
722 /* Store as many bytes as possible. */
723 while (accumbits
>= 8) {
727 *p
= (unsigned char)(accum
& 0xff);
734 /* Store the straggler (if any). */
735 assert(accumbits
< 8);
736 assert(carry
== 0); /* else do_twos_comp and *every* digit was 0 */
742 /* Fill leading bits of the byte with sign bits
743 (appropriately pretending that the long had an
744 infinite supply of sign bits). */
745 accum
|= (~(twodigits
)0) << accumbits
;
747 *p
= (unsigned char)(accum
& 0xff);
750 else if (j
== n
&& n
> 0 && is_signed
) {
751 /* The main loop filled the byte array exactly, so the code
752 just above didn't get to ensure there's a sign bit, and the
753 loop below wouldn't add one either. Make sure a sign bit
755 unsigned char msb
= *(p
- pincr
);
756 int sign_bit_set
= msb
>= 0x80;
757 assert(accumbits
== 0);
758 if (sign_bit_set
== do_twos_comp
)
764 /* Fill remaining bytes with copies of the sign bit. */
766 unsigned char signbyte
= do_twos_comp
? 0xffU
: 0U;
767 for ( ; j
< n
; ++j
, p
+= pincr
)
774 PyErr_SetString(PyExc_OverflowError
, "long too big to convert");
779 /* Create a new long (or int) object from a C pointer */
782 PyLong_FromVoidPtr(void *p
)
784 #if SIZEOF_VOID_P <= SIZEOF_LONG
786 return PyLong_FromUnsignedLong((unsigned long)p
);
787 return PyInt_FromLong((long)p
);
790 #ifndef HAVE_LONG_LONG
791 # error "PyLong_FromVoidPtr: sizeof(void*) > sizeof(long), but no long long"
793 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
794 # error "PyLong_FromVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
796 /* optimize null pointers */
798 return PyInt_FromLong(0);
799 return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG
)p
);
801 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
804 /* Get a C pointer from a long object (or an int object in some cases) */
807 PyLong_AsVoidPtr(PyObject
*vv
)
809 /* This function will allow int or long objects. If vv is neither,
810 then the PyLong_AsLong*() functions will raise the exception:
811 PyExc_SystemError, "bad argument to internal function"
813 #if SIZEOF_VOID_P <= SIZEOF_LONG
817 x
= PyInt_AS_LONG(vv
);
818 else if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
819 x
= PyLong_AsLong(vv
);
821 x
= PyLong_AsUnsignedLong(vv
);
824 #ifndef HAVE_LONG_LONG
825 # error "PyLong_AsVoidPtr: sizeof(void*) > sizeof(long), but no long long"
827 #if SIZEOF_LONG_LONG < SIZEOF_VOID_P
828 # error "PyLong_AsVoidPtr: sizeof(PY_LONG_LONG) < sizeof(void*)"
833 x
= PyInt_AS_LONG(vv
);
834 else if (PyLong_Check(vv
) && _PyLong_Sign(vv
) < 0)
835 x
= PyLong_AsLongLong(vv
);
837 x
= PyLong_AsUnsignedLongLong(vv
);
839 #endif /* SIZEOF_VOID_P <= SIZEOF_LONG */
841 if (x
== -1 && PyErr_Occurred())
846 #ifdef HAVE_LONG_LONG
848 /* Initial PY_LONG_LONG support by Chris Herborth (chrish@qnx.com), later
849 * rewritten to use the newer PyLong_{As,From}ByteArray API.
852 #define IS_LITTLE_ENDIAN (int)*(unsigned char*)&one
853 #define PY_ABS_LLONG_MIN (0-(unsigned PY_LONG_LONG)PY_LLONG_MIN)
855 /* Create a new long int object from a C PY_LONG_LONG int. */
858 PyLong_FromLongLong(PY_LONG_LONG ival
)
861 unsigned PY_LONG_LONG abs_ival
;
862 unsigned PY_LONG_LONG t
; /* unsigned so >> doesn't propagate sign bit */
867 /* avoid signed overflow on negation; see comments
868 in PyLong_FromLong above. */
869 abs_ival
= (unsigned PY_LONG_LONG
)(-1-ival
) + 1;
873 abs_ival
= (unsigned PY_LONG_LONG
)ival
;
876 /* Count the number of Python digits.
877 We used to pick 5 ("big enough for anything"), but that's a
878 waste of time and space given that 5*15 = 75 bits are rarely
885 v
= _PyLong_New(ndigits
);
887 digit
*p
= v
->ob_digit
;
888 Py_SIZE(v
) = negative
? -ndigits
: ndigits
;
891 *p
++ = (digit
)(t
& PyLong_MASK
);
895 return (PyObject
*)v
;
898 /* Create a new long int object from a C unsigned PY_LONG_LONG int. */
901 PyLong_FromUnsignedLongLong(unsigned PY_LONG_LONG ival
)
904 unsigned PY_LONG_LONG t
;
907 /* Count the number of Python digits. */
908 t
= (unsigned PY_LONG_LONG
)ival
;
913 v
= _PyLong_New(ndigits
);
915 digit
*p
= v
->ob_digit
;
916 Py_SIZE(v
) = ndigits
;
918 *p
++ = (digit
)(ival
& PyLong_MASK
);
919 ival
>>= PyLong_SHIFT
;
922 return (PyObject
*)v
;
925 /* Create a new long int object from a C Py_ssize_t. */
928 PyLong_FromSsize_t(Py_ssize_t ival
)
930 Py_ssize_t bytes
= ival
;
932 return _PyLong_FromByteArray((unsigned char *)&bytes
,
933 SIZEOF_SIZE_T
, IS_LITTLE_ENDIAN
, 1);
936 /* Create a new long int object from a C size_t. */
939 PyLong_FromSize_t(size_t ival
)
943 return _PyLong_FromByteArray((unsigned char *)&bytes
,
944 SIZEOF_SIZE_T
, IS_LITTLE_ENDIAN
, 0);
947 /* Get a C PY_LONG_LONG int from a long int object.
948 Return -1 and set an error if overflow occurs. */
951 PyLong_AsLongLong(PyObject
*vv
)
958 PyErr_BadInternalCall();
961 if (!PyLong_Check(vv
)) {
965 return (PY_LONG_LONG
)PyInt_AsLong(vv
);
966 if ((nb
= vv
->ob_type
->tp_as_number
) == NULL
||
967 nb
->nb_int
== NULL
) {
968 PyErr_SetString(PyExc_TypeError
, "an integer is required");
971 io
= (*nb
->nb_int
) (vv
);
974 if (PyInt_Check(io
)) {
975 bytes
= PyInt_AsLong(io
);
979 if (PyLong_Check(io
)) {
980 bytes
= PyLong_AsLongLong(io
);
985 PyErr_SetString(PyExc_TypeError
, "integer conversion failed");
989 res
= _PyLong_AsByteArray((PyLongObject
*)vv
, (unsigned char *)&bytes
,
990 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 1);
992 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
994 return (PY_LONG_LONG
)-1;
999 /* Get a C unsigned PY_LONG_LONG int from a long int object.
1000 Return -1 and set an error if overflow occurs. */
1002 unsigned PY_LONG_LONG
1003 PyLong_AsUnsignedLongLong(PyObject
*vv
)
1005 unsigned PY_LONG_LONG bytes
;
1009 if (vv
== NULL
|| !PyLong_Check(vv
)) {
1010 PyErr_BadInternalCall();
1011 return (unsigned PY_LONG_LONG
)-1;
1014 res
= _PyLong_AsByteArray((PyLongObject
*)vv
, (unsigned char *)&bytes
,
1015 SIZEOF_LONG_LONG
, IS_LITTLE_ENDIAN
, 0);
1017 /* Plan 9 can't handle PY_LONG_LONG in ? : expressions */
1019 return (unsigned PY_LONG_LONG
)res
;
1024 /* Get a C unsigned long int from a long int object, ignoring the high bits.
1025 Returns -1 and sets an error condition if an error occurs. */
1027 unsigned PY_LONG_LONG
1028 PyLong_AsUnsignedLongLongMask(PyObject
*vv
)
1030 register PyLongObject
*v
;
1031 unsigned PY_LONG_LONG x
;
1035 if (vv
== NULL
|| !PyLong_Check(vv
)) {
1036 PyErr_BadInternalCall();
1037 return (unsigned long) -1;
1039 v
= (PyLongObject
*)vv
;
1048 x
= (x
<< PyLong_SHIFT
) | v
->ob_digit
[i
];
1053 /* Get a C long long int from a Python long or Python int object.
1054 On overflow, returns -1 and sets *overflow to 1 or -1 depending
1055 on the sign of the result. Otherwise *overflow is 0.
1057 For other errors (e.g., type error), returns -1 and sets an error
1062 PyLong_AsLongLongAndOverflow(PyObject
*vv
, int *overflow
)
1064 /* This version by Tim Peters */
1065 register PyLongObject
*v
;
1066 unsigned PY_LONG_LONG x
, prev
;
1070 int do_decref
= 0; /* if nb_int was called */
1074 PyErr_BadInternalCall();
1078 if (PyInt_Check(vv
))
1079 return PyInt_AsLong(vv
);
1081 if (!PyLong_Check(vv
)) {
1082 PyNumberMethods
*nb
;
1083 nb
= vv
->ob_type
->tp_as_number
;
1084 if (nb
== NULL
|| nb
->nb_int
== NULL
) {
1085 PyErr_SetString(PyExc_TypeError
,
1086 "an integer is required");
1089 vv
= (*nb
->nb_int
) (vv
);
1093 if(PyInt_Check(vv
)) {
1094 res
= PyInt_AsLong(vv
);
1097 if (!PyLong_Check(vv
)) {
1099 PyErr_SetString(PyExc_TypeError
,
1100 "nb_int should return int object");
1106 v
= (PyLongObject
*)vv
;
1111 res
= -(sdigit
)v
->ob_digit
[0];
1117 res
= v
->ob_digit
[0];
1128 x
= (x
<< PyLong_SHIFT
) + v
->ob_digit
[i
];
1129 if ((x
>> PyLong_SHIFT
) != prev
) {
1134 /* Haven't lost any bits, but casting to long requires extra
1135 * care (see comment above).
1137 if (x
<= (unsigned PY_LONG_LONG
)PY_LLONG_MAX
) {
1138 res
= (PY_LONG_LONG
)x
* sign
;
1140 else if (sign
< 0 && x
== PY_ABS_LLONG_MIN
) {
1145 /* res is already set to -1 */
1155 #undef IS_LITTLE_ENDIAN
1157 #endif /* HAVE_LONG_LONG */
1161 convert_binop(PyObject
*v
, PyObject
*w
, PyLongObject
**a
, PyLongObject
**b
) {
1162 if (PyLong_Check(v
)) {
1163 *a
= (PyLongObject
*) v
;
1166 else if (PyInt_Check(v
)) {
1167 *a
= (PyLongObject
*) PyLong_FromLong(PyInt_AS_LONG(v
));
1172 if (PyLong_Check(w
)) {
1173 *b
= (PyLongObject
*) w
;
1176 else if (PyInt_Check(w
)) {
1177 *b
= (PyLongObject
*) PyLong_FromLong(PyInt_AS_LONG(w
));
1186 #define CONVERT_BINOP(v, w, a, b) \
1188 if (!convert_binop(v, w, a, b)) { \
1189 Py_INCREF(Py_NotImplemented); \
1190 return Py_NotImplemented; \
1194 /* bits_in_digit(d) returns the unique integer k such that 2**(k-1) <= d <
1195 2**k if d is nonzero, else 0. */
1197 static const unsigned char BitLengthTable
[32] = {
1198 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
1199 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
1203 bits_in_digit(digit d
)
1210 d_bits
+= (int)BitLengthTable
[d
];
1214 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1215 * is modified in place, by adding y to it. Carries are propagated as far as
1216 * x[m-1], and the remaining carry (0 or 1) is returned.
1219 v_iadd(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1225 for (i
= 0; i
< n
; ++i
) {
1226 carry
+= x
[i
] + y
[i
];
1227 x
[i
] = carry
& PyLong_MASK
;
1228 carry
>>= PyLong_SHIFT
;
1229 assert((carry
& 1) == carry
);
1231 for (; carry
&& i
< m
; ++i
) {
1233 x
[i
] = carry
& PyLong_MASK
;
1234 carry
>>= PyLong_SHIFT
;
1235 assert((carry
& 1) == carry
);
1240 /* x[0:m] and y[0:n] are digit vectors, LSD first, m >= n required. x[0:n]
1241 * is modified in place, by subtracting y from it. Borrows are propagated as
1242 * far as x[m-1], and the remaining borrow (0 or 1) is returned.
1245 v_isub(digit
*x
, Py_ssize_t m
, digit
*y
, Py_ssize_t n
)
1251 for (i
= 0; i
< n
; ++i
) {
1252 borrow
= x
[i
] - y
[i
] - borrow
;
1253 x
[i
] = borrow
& PyLong_MASK
;
1254 borrow
>>= PyLong_SHIFT
;
1255 borrow
&= 1; /* keep only 1 sign bit */
1257 for (; borrow
&& i
< m
; ++i
) {
1258 borrow
= x
[i
] - borrow
;
1259 x
[i
] = borrow
& PyLong_MASK
;
1260 borrow
>>= PyLong_SHIFT
;
1266 /* Shift digit vector a[0:m] d bits left, with 0 <= d < PyLong_SHIFT. Put
1267 * result in z[0:m], and return the d bits shifted out of the top.
1270 v_lshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1275 assert(0 <= d
&& d
< PyLong_SHIFT
);
1276 for (i
=0; i
< m
; i
++) {
1277 twodigits acc
= (twodigits
)a
[i
] << d
| carry
;
1278 z
[i
] = (digit
)acc
& PyLong_MASK
;
1279 carry
= (digit
)(acc
>> PyLong_SHIFT
);
1284 /* Shift digit vector a[0:m] d bits right, with 0 <= d < PyLong_SHIFT. Put
1285 * result in z[0:m], and return the d bits shifted out of the bottom.
1288 v_rshift(digit
*z
, digit
*a
, Py_ssize_t m
, int d
)
1292 digit mask
= ((digit
)1 << d
) - 1U;
1294 assert(0 <= d
&& d
< PyLong_SHIFT
);
1295 for (i
=m
; i
-- > 0;) {
1296 twodigits acc
= (twodigits
)carry
<< PyLong_SHIFT
| a
[i
];
1297 carry
= (digit
)acc
& mask
;
1298 z
[i
] = (digit
)(acc
>> d
);
1303 /* Divide long pin, w/ size digits, by non-zero digit n, storing quotient
1304 in pout, and returning the remainder. pin and pout point at the LSD.
1305 It's OK for pin == pout on entry, which saves oodles of mallocs/frees in
1306 _PyLong_Format, but that should be done with great care since longs are
1310 inplace_divrem1(digit
*pout
, digit
*pin
, Py_ssize_t size
, digit n
)
1314 assert(n
> 0 && n
<= PyLong_MASK
);
1317 while (--size
>= 0) {
1319 rem
= (rem
<< PyLong_SHIFT
) | *--pin
;
1320 *--pout
= hi
= (digit
)(rem
/ n
);
1321 rem
-= (twodigits
)hi
* n
;
1326 /* Divide a long integer by a digit, returning both the quotient
1327 (as function result) and the remainder (through *prem).
1328 The sign of a is ignored; n should not be zero. */
1330 static PyLongObject
*
1331 divrem1(PyLongObject
*a
, digit n
, digit
*prem
)
1333 const Py_ssize_t size
= ABS(Py_SIZE(a
));
1336 assert(n
> 0 && n
<= PyLong_MASK
);
1337 z
= _PyLong_New(size
);
1340 *prem
= inplace_divrem1(z
->ob_digit
, a
->ob_digit
, size
, n
);
1341 return long_normalize(z
);
1344 /* Convert a long integer to a base 10 string. Returns a new non-shared
1345 string. (Return value is non-shared so that callers can modify the
1346 returned value if necessary.) */
1349 long_to_decimal_string(PyObject
*aa
, int addL
)
1351 PyLongObject
*scratch
, *a
;
1353 Py_ssize_t size
, strlen
, size_a
, i
, j
;
1354 digit
*pout
, *pin
, rem
, tenpow
;
1358 a
= (PyLongObject
*)aa
;
1359 if (a
== NULL
|| !PyLong_Check(a
)) {
1360 PyErr_BadInternalCall();
1363 size_a
= ABS(Py_SIZE(a
));
1364 negative
= Py_SIZE(a
) < 0;
1366 /* quick and dirty upper bound for the number of digits
1367 required to express a in base _PyLong_DECIMAL_BASE:
1369 #digits = 1 + floor(log2(a) / log2(_PyLong_DECIMAL_BASE))
1371 But log2(a) < size_a * PyLong_SHIFT, and
1372 log2(_PyLong_DECIMAL_BASE) = log2(10) * _PyLong_DECIMAL_SHIFT
1373 > 3 * _PyLong_DECIMAL_SHIFT
1375 if (size_a
> PY_SSIZE_T_MAX
/ PyLong_SHIFT
) {
1376 PyErr_SetString(PyExc_OverflowError
,
1377 "long is too large to format");
1380 /* the expression size_a * PyLong_SHIFT is now safe from overflow */
1381 size
= 1 + size_a
* PyLong_SHIFT
/ (3 * _PyLong_DECIMAL_SHIFT
);
1382 scratch
= _PyLong_New(size
);
1383 if (scratch
== NULL
)
1386 /* convert array of base _PyLong_BASE digits in pin to an array of
1387 base _PyLong_DECIMAL_BASE digits in pout, following Knuth (TAOCP,
1388 Volume 2 (3rd edn), section 4.4, Method 1b). */
1390 pout
= scratch
->ob_digit
;
1392 for (i
= size_a
; --i
>= 0; ) {
1394 for (j
= 0; j
< size
; j
++) {
1395 twodigits z
= (twodigits
)pout
[j
] << PyLong_SHIFT
| hi
;
1396 hi
= (digit
)(z
/ _PyLong_DECIMAL_BASE
);
1397 pout
[j
] = (digit
)(z
- (twodigits
)hi
*
1398 _PyLong_DECIMAL_BASE
);
1401 pout
[size
++] = hi
% _PyLong_DECIMAL_BASE
;
1402 hi
/= _PyLong_DECIMAL_BASE
;
1404 /* check for keyboard interrupt */
1410 /* pout should have at least one digit, so that the case when a = 0
1415 /* calculate exact length of output string, and allocate */
1416 strlen
= (addL
!= 0) + negative
+
1417 1 + (size
- 1) * _PyLong_DECIMAL_SHIFT
;
1420 while (rem
>= tenpow
) {
1424 str
= PyString_FromStringAndSize(NULL
, strlen
);
1430 /* fill the string right-to-left */
1431 p
= PyString_AS_STRING(str
) + strlen
;
1435 /* pout[0] through pout[size-2] contribute exactly
1436 _PyLong_DECIMAL_SHIFT digits each */
1437 for (i
=0; i
< size
- 1; i
++) {
1439 for (j
= 0; j
< _PyLong_DECIMAL_SHIFT
; j
++) {
1440 *--p
= '0' + rem
% 10;
1444 /* pout[size-1]: always produce at least one decimal digit */
1447 *--p
= '0' + rem
% 10;
1455 /* check we've counted correctly */
1456 assert(p
== PyString_AS_STRING(str
));
1458 return (PyObject
*)str
;
1461 /* Convert the long to a string object with given base,
1462 appending a base prefix of 0[box] if base is 2, 8 or 16.
1463 Add a trailing "L" if addL is non-zero.
1464 If newstyle is zero, then use the pre-2.6 behavior of octal having
1465 a leading "0", instead of the prefix "0o" */
1466 PyAPI_FUNC(PyObject
*)
1467 _PyLong_Format(PyObject
*aa
, int base
, int addL
, int newstyle
)
1469 register PyLongObject
*a
= (PyLongObject
*)aa
;
1470 PyStringObject
*str
;
1478 return long_to_decimal_string((PyObject
*)a
, addL
);
1480 if (a
== NULL
|| !PyLong_Check(a
)) {
1481 PyErr_BadInternalCall();
1484 assert(base
>= 2 && base
<= 36);
1485 size_a
= ABS(Py_SIZE(a
));
1487 /* Compute a rough upper bound for the length of the string */
1494 i
= 5 + (addL
? 1 : 0);
1495 /* ensure we don't get signed overflow in sz calculation */
1496 if (size_a
> (PY_SSIZE_T_MAX
- i
) / PyLong_SHIFT
) {
1497 PyErr_SetString(PyExc_OverflowError
,
1498 "long is too large to format");
1501 sz
= i
+ 1 + (size_a
* PyLong_SHIFT
- 1) / bits
;
1503 str
= (PyStringObject
*) PyString_FromStringAndSize((char *)0, sz
);
1506 p
= PyString_AS_STRING(str
) + sz
;
1513 if (a
->ob_size
== 0) {
1516 else if ((base
& (base
- 1)) == 0) {
1517 /* JRH: special case for power-of-2 bases */
1518 twodigits accum
= 0;
1519 int accumbits
= 0; /* # of bits in accum */
1520 int basebits
= 1; /* # of bits in base-1 */
1522 while ((i
>>= 1) > 1)
1525 for (i
= 0; i
< size_a
; ++i
) {
1526 accum
|= (twodigits
)a
->ob_digit
[i
] << accumbits
;
1527 accumbits
+= PyLong_SHIFT
;
1528 assert(accumbits
>= basebits
);
1530 char cdigit
= (char)(accum
& (base
- 1));
1531 cdigit
+= (cdigit
< 10) ? '0' : 'a'-10;
1532 assert(p
> PyString_AS_STRING(str
));
1534 accumbits
-= basebits
;
1536 } while (i
< size_a
-1 ? accumbits
>= basebits
: accum
> 0);
1540 /* Not 0, and base not a power of 2. Divide repeatedly by
1541 base, but for speed use the highest power of base that
1543 Py_ssize_t size
= size_a
;
1544 digit
*pin
= a
->ob_digit
;
1545 PyLongObject
*scratch
;
1546 /* powbasw <- largest power of base that fits in a digit. */
1547 digit powbase
= base
; /* powbase == base ** power */
1550 twodigits newpow
= powbase
* (twodigits
)base
;
1551 if (newpow
>> PyLong_SHIFT
)
1552 /* doesn't fit in a digit */
1554 powbase
= (digit
)newpow
;
1558 /* Get a scratch area for repeated division. */
1559 scratch
= _PyLong_New(size
);
1560 if (scratch
== NULL
) {
1565 /* Repeatedly divide by powbase. */
1567 int ntostore
= power
;
1568 digit rem
= inplace_divrem1(scratch
->ob_digit
,
1569 pin
, size
, powbase
);
1570 pin
= scratch
->ob_digit
; /* no need to use a again */
1571 if (pin
[size
- 1] == 0)
1579 /* Break rem into digits. */
1580 assert(ntostore
> 0);
1582 digit nextrem
= (digit
)(rem
/ base
);
1583 char c
= (char)(rem
- nextrem
* base
);
1584 assert(p
> PyString_AS_STRING(str
));
1585 c
+= (c
< 10) ? '0' : 'a'-10;
1589 /* Termination is a bit delicate: must not
1590 store leading zeroes, so must get out if
1591 remaining quotient and rem are both 0. */
1592 } while (ntostore
&& (size
|| rem
));
1593 } while (size
!= 0);
1601 else if (base
== 8) {
1610 else if (base
== 16) {
1614 else if (base
!= 10) {
1616 *--p
= '0' + base
%10;
1618 *--p
= '0' + base
/10;
1622 if (p
!= PyString_AS_STRING(str
)) {
1623 char *q
= PyString_AS_STRING(str
);
1626 } while ((*q
++ = *p
++) != '\0');
1628 _PyString_Resize((PyObject
**)&str
,
1629 (Py_ssize_t
) (q
- PyString_AS_STRING(str
)));
1631 return (PyObject
*)str
;
1634 /* Table of digit values for 8-bit string -> integer conversion.
1635 * '0' maps to 0, ..., '9' maps to 9.
1636 * 'a' and 'A' map to 10, ..., 'z' and 'Z' map to 35.
1637 * All other indices map to 37.
1638 * Note that when converting a base B string, a char c is a legitimate
1639 * base B digit iff _PyLong_DigitValue[Py_CHARMASK(c)] < B.
1641 int _PyLong_DigitValue
[256] = {
1642 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1643 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1644 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1645 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 37, 37, 37, 37, 37, 37,
1646 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1647 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1648 37, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
1649 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 37, 37, 37, 37,
1650 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1651 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1652 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1653 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1654 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1655 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1656 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1657 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
1660 /* *str points to the first digit in a string of base `base` digits. base
1661 * is a power of 2 (2, 4, 8, 16, or 32). *str is set to point to the first
1662 * non-digit (which may be *str!). A normalized long is returned.
1663 * The point to this routine is that it takes time linear in the number of
1664 * string characters.
1666 static PyLongObject
*
1667 long_from_binary_base(char **str
, int base
)
1678 assert(base
>= 2 && base
<= 32 && (base
& (base
- 1)) == 0);
1680 for (bits_per_char
= -1; n
; ++bits_per_char
)
1682 /* n <- total # of bits needed, while setting p to end-of-string */
1683 while (_PyLong_DigitValue
[Py_CHARMASK(*p
)] < base
)
1686 /* n <- # of Python digits needed, = ceiling(n/PyLong_SHIFT). */
1687 n
= (p
- start
) * bits_per_char
+ PyLong_SHIFT
- 1;
1688 if (n
/ bits_per_char
< p
- start
) {
1689 PyErr_SetString(PyExc_ValueError
,
1690 "long string too large to convert");
1693 n
= n
/ PyLong_SHIFT
;
1697 /* Read string from right, and fill in long from left; i.e.,
1698 * from least to most significant in both.
1702 pdigit
= z
->ob_digit
;
1703 while (--p
>= start
) {
1704 int k
= _PyLong_DigitValue
[Py_CHARMASK(*p
)];
1705 assert(k
>= 0 && k
< base
);
1706 accum
|= (twodigits
)k
<< bits_in_accum
;
1707 bits_in_accum
+= bits_per_char
;
1708 if (bits_in_accum
>= PyLong_SHIFT
) {
1709 *pdigit
++ = (digit
)(accum
& PyLong_MASK
);
1710 assert(pdigit
- z
->ob_digit
<= n
);
1711 accum
>>= PyLong_SHIFT
;
1712 bits_in_accum
-= PyLong_SHIFT
;
1713 assert(bits_in_accum
< PyLong_SHIFT
);
1716 if (bits_in_accum
) {
1717 assert(bits_in_accum
<= PyLong_SHIFT
);
1718 *pdigit
++ = (digit
)accum
;
1719 assert(pdigit
- z
->ob_digit
<= n
);
1721 while (pdigit
- z
->ob_digit
< n
)
1723 return long_normalize(z
);
1727 PyLong_FromString(char *str
, char **pend
, int base
)
1730 char *start
, *orig_str
= str
;
1732 PyObject
*strobj
, *strrepr
;
1735 if ((base
!= 0 && base
< 2) || base
> 36) {
1736 PyErr_SetString(PyExc_ValueError
,
1737 "long() arg 2 must be >= 2 and <= 36");
1740 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1744 else if (*str
== '-') {
1748 while (*str
!= '\0' && isspace(Py_CHARMASK(*str
)))
1751 /* No base given. Deduce the base from the contents
1755 else if (str
[1] == 'x' || str
[1] == 'X')
1757 else if (str
[1] == 'o' || str
[1] == 'O')
1759 else if (str
[1] == 'b' || str
[1] == 'B')
1762 /* "old" (C-style) octal literal, still valid in
1763 2.x, although illegal in 3.x */
1766 /* Whether or not we were deducing the base, skip leading chars
1768 if (str
[0] == '0' &&
1769 ((base
== 16 && (str
[1] == 'x' || str
[1] == 'X')) ||
1770 (base
== 8 && (str
[1] == 'o' || str
[1] == 'O')) ||
1771 (base
== 2 && (str
[1] == 'b' || str
[1] == 'B'))))
1775 if ((base
& (base
- 1)) == 0)
1776 z
= long_from_binary_base(&str
, base
);
1779 Binary bases can be converted in time linear in the number of digits, because
1780 Python's representation base is binary. Other bases (including decimal!) use
1781 the simple quadratic-time algorithm below, complicated by some speed tricks.
1783 First some math: the largest integer that can be expressed in N base-B digits
1784 is B**N-1. Consequently, if we have an N-digit input in base B, the worst-
1785 case number of Python digits needed to hold it is the smallest integer n s.t.
1787 PyLong_BASE**n-1 >= B**N-1 [or, adding 1 to both sides]
1788 PyLong_BASE**n >= B**N [taking logs to base PyLong_BASE]
1789 n >= log(B**N)/log(PyLong_BASE) = N * log(B)/log(PyLong_BASE)
1791 The static array log_base_PyLong_BASE[base] == log(base)/log(PyLong_BASE) so
1792 we can compute this quickly. A Python long with that much space is reserved
1793 near the start, and the result is computed into it.
1795 The input string is actually treated as being in base base**i (i.e., i digits
1796 are processed at a time), where two more static arrays hold:
1798 convwidth_base[base] = the largest integer i such that
1799 base**i <= PyLong_BASE
1800 convmultmax_base[base] = base ** convwidth_base[base]
1802 The first of these is the largest i such that i consecutive input digits
1803 must fit in a single Python digit. The second is effectively the input
1804 base we're really using.
1806 Viewing the input as a sequence <c0, c1, ..., c_n-1> of digits in base
1807 convmultmax_base[base], the result is "simply"
1809 (((c0*B + c1)*B + c2)*B + c3)*B + ... ))) + c_n-1
1811 where B = convmultmax_base[base].
1813 Error analysis: as above, the number of Python digits `n` needed is worst-
1816 n >= N * log(B)/log(PyLong_BASE)
1818 where `N` is the number of input digits in base `B`. This is computed via
1820 size_z = (Py_ssize_t)((scan - str) * log_base_PyLong_BASE[base]) + 1;
1822 below. Two numeric concerns are how much space this can waste, and whether
1823 the computed result can be too small. To be concrete, assume PyLong_BASE =
1824 2**15, which is the default (and it's unlikely anyone changes that).
1826 Waste isn't a problem: provided the first input digit isn't 0, the difference
1827 between the worst-case input with N digits and the smallest input with N
1828 digits is about a factor of B, but B is small compared to PyLong_BASE so at
1829 most one allocated Python digit can remain unused on that count. If
1830 N*log(B)/log(PyLong_BASE) is mathematically an exact integer, then truncating
1831 that and adding 1 returns a result 1 larger than necessary. However, that
1832 can't happen: whenever B is a power of 2, long_from_binary_base() is called
1833 instead, and it's impossible for B**i to be an integer power of 2**15 when B
1834 is not a power of 2 (i.e., it's impossible for N*log(B)/log(PyLong_BASE) to be
1835 an exact integer when B is not a power of 2, since B**i has a prime factor
1836 other than 2 in that case, but (2**15)**j's only prime factor is 2).
1838 The computed result can be too small if the true value of
1839 N*log(B)/log(PyLong_BASE) is a little bit larger than an exact integer, but
1840 due to roundoff errors (in computing log(B), log(PyLong_BASE), their quotient,
1841 and/or multiplying that by N) yields a numeric result a little less than that
1842 integer. Unfortunately, "how close can a transcendental function get to an
1843 integer over some range?" questions are generally theoretically intractable.
1844 Computer analysis via continued fractions is practical: expand
1845 log(B)/log(PyLong_BASE) via continued fractions, giving a sequence i/j of "the
1846 best" rational approximations. Then j*log(B)/log(PyLong_BASE) is
1847 approximately equal to (the integer) i. This shows that we can get very close
1848 to being in trouble, but very rarely. For example, 76573 is a denominator in
1849 one of the continued-fraction approximations to log(10)/log(2**15), and
1852 >>> log(10)/log(2**15)*76573
1855 is very close to an integer. If we were working with IEEE single-precision,
1856 rounding errors could kill us. Finding worst cases in IEEE double-precision
1857 requires better-than-double-precision log() functions, and Tim didn't bother.
1858 Instead the code checks to see whether the allocated space is enough as each
1859 new Python digit is added, and copies the whole thing to a larger long if not.
1860 This should happen extremely rarely, and in fact I don't have a test case
1861 that triggers it(!). Instead the code was tested by artificially allocating
1862 just 1 digit at the start, so that the copying code was exercised for every
1863 digit beyond the first.
1865 register twodigits c
; /* current input character */
1869 twodigits convmultmax
, convmult
;
1873 static double log_base_PyLong_BASE
[37] = {0.0e0
,};
1874 static int convwidth_base
[37] = {0,};
1875 static twodigits convmultmax_base
[37] = {0,};
1877 if (log_base_PyLong_BASE
[base
] == 0.0) {
1878 twodigits convmax
= base
;
1881 log_base_PyLong_BASE
[base
] = (log((double)base
) /
1882 log((double)PyLong_BASE
));
1884 twodigits next
= convmax
* base
;
1885 if (next
> PyLong_BASE
)
1890 convmultmax_base
[base
] = convmax
;
1892 convwidth_base
[base
] = i
;
1895 /* Find length of the string of numeric characters. */
1897 while (_PyLong_DigitValue
[Py_CHARMASK(*scan
)] < base
)
1900 /* Create a long object that can contain the largest possible
1901 * integer with this base and length. Note that there's no
1902 * need to initialize z->ob_digit -- no slot is read up before
1903 * being stored into.
1905 size_z
= (Py_ssize_t
)((scan
- str
) * log_base_PyLong_BASE
[base
]) + 1;
1906 /* Uncomment next line to test exceedingly rare copy code */
1909 z
= _PyLong_New(size_z
);
1914 /* `convwidth` consecutive input digits are treated as a single
1915 * digit in base `convmultmax`.
1917 convwidth
= convwidth_base
[base
];
1918 convmultmax
= convmultmax_base
[base
];
1921 while (str
< scan
) {
1922 /* grab up to convwidth digits from the input string */
1923 c
= (digit
)_PyLong_DigitValue
[Py_CHARMASK(*str
++)];
1924 for (i
= 1; i
< convwidth
&& str
!= scan
; ++i
, ++str
) {
1925 c
= (twodigits
)(c
* base
+
1926 _PyLong_DigitValue
[Py_CHARMASK(*str
)]);
1927 assert(c
< PyLong_BASE
);
1930 convmult
= convmultmax
;
1931 /* Calculate the shift only if we couldn't get
1934 if (i
!= convwidth
) {
1940 /* Multiply z by convmult, and add c. */
1942 pzstop
= pz
+ Py_SIZE(z
);
1943 for (; pz
< pzstop
; ++pz
) {
1944 c
+= (twodigits
)*pz
* convmult
;
1945 *pz
= (digit
)(c
& PyLong_MASK
);
1948 /* carry off the current end? */
1950 assert(c
< PyLong_BASE
);
1951 if (Py_SIZE(z
) < size_z
) {
1957 /* Extremely rare. Get more space. */
1958 assert(Py_SIZE(z
) == size_z
);
1959 tmp
= _PyLong_New(size_z
+ 1);
1964 memcpy(tmp
->ob_digit
,
1966 sizeof(digit
) * size_z
);
1969 z
->ob_digit
[size_z
] = (digit
)c
;
1980 Py_SIZE(z
) = -(Py_SIZE(z
));
1981 if (*str
== 'L' || *str
== 'l')
1983 while (*str
&& isspace(Py_CHARMASK(*str
)))
1989 return (PyObject
*) z
;
1993 slen
= strlen(orig_str
) < 200 ? strlen(orig_str
) : 200;
1994 strobj
= PyString_FromStringAndSize(orig_str
, slen
);
1997 strrepr
= PyObject_Repr(strobj
);
1999 if (strrepr
== NULL
)
2001 PyErr_Format(PyExc_ValueError
,
2002 "invalid literal for long() with base %d: %s",
2003 base
, PyString_AS_STRING(strrepr
));
2008 #ifdef Py_USING_UNICODE
2010 PyLong_FromUnicode(Py_UNICODE
*u
, Py_ssize_t length
, int base
)
2013 char *buffer
= (char *)PyMem_MALLOC(length
+1);
2018 if (PyUnicode_EncodeDecimal(u
, length
, buffer
, NULL
)) {
2022 result
= PyLong_FromString(buffer
, NULL
, base
);
2029 static PyLongObject
*x_divrem
2030 (PyLongObject
*, PyLongObject
*, PyLongObject
**);
2031 static PyObject
*long_long(PyObject
*v
);
2033 /* Long division with remainder, top-level routine */
2036 long_divrem(PyLongObject
*a
, PyLongObject
*b
,
2037 PyLongObject
**pdiv
, PyLongObject
**prem
)
2039 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2043 PyErr_SetString(PyExc_ZeroDivisionError
,
2044 "long division or modulo by zero");
2047 if (size_a
< size_b
||
2048 (size_a
== size_b
&&
2049 a
->ob_digit
[size_a
-1] < b
->ob_digit
[size_b
-1])) {
2051 *pdiv
= _PyLong_New(0);
2055 *prem
= (PyLongObject
*) a
;
2060 z
= divrem1(a
, b
->ob_digit
[0], &rem
);
2063 *prem
= (PyLongObject
*) PyLong_FromLong((long)rem
);
2064 if (*prem
== NULL
) {
2070 z
= x_divrem(a
, b
, prem
);
2075 The quotient z has the sign of a*b;
2076 the remainder r has the sign of a,
2078 if ((a
->ob_size
< 0) != (b
->ob_size
< 0))
2079 z
->ob_size
= -(z
->ob_size
);
2080 if (a
->ob_size
< 0 && (*prem
)->ob_size
!= 0)
2081 (*prem
)->ob_size
= -((*prem
)->ob_size
);
2086 /* Unsigned long division with remainder -- the algorithm. The arguments v1
2087 and w1 should satisfy 2 <= ABS(Py_SIZE(w1)) <= ABS(Py_SIZE(v1)). */
2089 static PyLongObject
*
2090 x_divrem(PyLongObject
*v1
, PyLongObject
*w1
, PyLongObject
**prem
)
2092 PyLongObject
*v
, *w
, *a
;
2093 Py_ssize_t i
, k
, size_v
, size_w
;
2095 digit wm1
, wm2
, carry
, q
, r
, vtop
, *v0
, *vk
, *w0
, *ak
;
2100 /* We follow Knuth [The Art of Computer Programming, Vol. 2 (3rd
2101 edn.), section 4.3.1, Algorithm D], except that we don't explicitly
2102 handle the special case when the initial estimate q for a quotient
2103 digit is >= PyLong_BASE: the max value for q is PyLong_BASE+1, and
2104 that won't overflow a digit. */
2106 /* allocate space; w will also be used to hold the final remainder */
2107 size_v
= ABS(Py_SIZE(v1
));
2108 size_w
= ABS(Py_SIZE(w1
));
2109 assert(size_v
>= size_w
&& size_w
>= 2); /* Assert checks by div() */
2110 v
= _PyLong_New(size_v
+1);
2115 w
= _PyLong_New(size_w
);
2122 /* normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
2123 shift v1 left by the same amount. Results go into w and v. */
2124 d
= PyLong_SHIFT
- bits_in_digit(w1
->ob_digit
[size_w
-1]);
2125 carry
= v_lshift(w
->ob_digit
, w1
->ob_digit
, size_w
, d
);
2127 carry
= v_lshift(v
->ob_digit
, v1
->ob_digit
, size_v
, d
);
2128 if (carry
!= 0 || v
->ob_digit
[size_v
-1] >= w
->ob_digit
[size_w
-1]) {
2129 v
->ob_digit
[size_v
] = carry
;
2133 /* Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
2134 at most (and usually exactly) k = size_v - size_w digits. */
2135 k
= size_v
- size_w
;
2148 for (vk
= v0
+k
, ak
= a
->ob_digit
+ k
; vk
-- > v0
;) {
2149 /* inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
2150 single-digit quotient q, remainder in vk[0:size_w]. */
2160 /* estimate quotient digit q; may overestimate by 1 (rare) */
2162 assert(vtop
<= wm1
);
2163 vv
= ((twodigits
)vtop
<< PyLong_SHIFT
) | vk
[size_w
-1];
2164 q
= (digit
)(vv
/ wm1
);
2165 r
= (digit
)(vv
- (twodigits
)wm1
* q
); /* r = vv % wm1 */
2166 while ((twodigits
)wm2
* q
> (((twodigits
)r
<< PyLong_SHIFT
)
2170 if (r
>= PyLong_BASE
)
2173 assert(q
<= PyLong_BASE
);
2175 /* subtract q*w0[0:size_w] from vk[0:size_w+1] */
2177 for (i
= 0; i
< size_w
; ++i
) {
2178 /* invariants: -PyLong_BASE <= -q <= zhi <= 0;
2179 -PyLong_BASE * q <= z < PyLong_BASE */
2180 z
= (sdigit
)vk
[i
] + zhi
-
2181 (stwodigits
)q
* (stwodigits
)w0
[i
];
2182 vk
[i
] = (digit
)z
& PyLong_MASK
;
2183 zhi
= (sdigit
)Py_ARITHMETIC_RIGHT_SHIFT(stwodigits
,
2187 /* add w back if q was too large (this branch taken rarely) */
2188 assert((sdigit
)vtop
+ zhi
== -1 || (sdigit
)vtop
+ zhi
== 0);
2189 if ((sdigit
)vtop
+ zhi
< 0) {
2191 for (i
= 0; i
< size_w
; ++i
) {
2192 carry
+= vk
[i
] + w0
[i
];
2193 vk
[i
] = carry
& PyLong_MASK
;
2194 carry
>>= PyLong_SHIFT
;
2199 /* store quotient digit */
2200 assert(q
< PyLong_BASE
);
2204 /* unshift remainder; we reuse w to store the result */
2205 carry
= v_rshift(w0
, v0
, size_w
, d
);
2209 *prem
= long_normalize(w
);
2210 return long_normalize(a
);
2213 /* For a nonzero PyLong a, express a in the form x * 2**e, with 0.5 <=
2214 abs(x) < 1.0 and e >= 0; return x and put e in *e. Here x is
2215 rounded to DBL_MANT_DIG significant bits using round-half-to-even.
2216 If a == 0, return 0.0 and set *e = 0. If the resulting exponent
2217 e is larger than PY_SSIZE_T_MAX, raise OverflowError and return
2220 /* attempt to define 2.0**DBL_MANT_DIG as a compile-time constant */
2221 #if DBL_MANT_DIG == 53
2222 #define EXP2_DBL_MANT_DIG 9007199254740992.0
2224 #define EXP2_DBL_MANT_DIG (ldexp(1.0, DBL_MANT_DIG))
2228 _PyLong_Frexp(PyLongObject
*a
, Py_ssize_t
*e
)
2230 Py_ssize_t a_size
, a_bits
, shift_digits
, shift_bits
, x_size
;
2231 /* See below for why x_digits is always large enough. */
2232 digit rem
, x_digits
[2 + (DBL_MANT_DIG
+ 1) / PyLong_SHIFT
];
2234 /* Correction term for round-half-to-even rounding. For a digit x,
2235 "x + half_even_correction[x & 7]" gives x rounded to the nearest
2236 multiple of 4, rounding ties to a multiple of 8. */
2237 static const int half_even_correction
[8] = {0, -1, -2, 1, 0, -1, 2, 1};
2239 a_size
= ABS(Py_SIZE(a
));
2241 /* Special case for 0: significand 0.0, exponent 0. */
2245 a_bits
= bits_in_digit(a
->ob_digit
[a_size
-1]);
2246 /* The following is an overflow-free version of the check
2247 "if ((a_size - 1) * PyLong_SHIFT + a_bits > PY_SSIZE_T_MAX) ..." */
2248 if (a_size
>= (PY_SSIZE_T_MAX
- 1) / PyLong_SHIFT
+ 1 &&
2249 (a_size
> (PY_SSIZE_T_MAX
- 1) / PyLong_SHIFT
+ 1 ||
2250 a_bits
> (PY_SSIZE_T_MAX
- 1) % PyLong_SHIFT
+ 1))
2252 a_bits
= (a_size
- 1) * PyLong_SHIFT
+ a_bits
;
2254 /* Shift the first DBL_MANT_DIG + 2 bits of a into x_digits[0:x_size]
2255 (shifting left if a_bits <= DBL_MANT_DIG + 2).
2257 Number of digits needed for result: write // for floor division.
2258 Then if shifting left, we end up using
2260 1 + a_size + (DBL_MANT_DIG + 2 - a_bits) // PyLong_SHIFT
2262 digits. If shifting right, we use
2264 a_size - (a_bits - DBL_MANT_DIG - 2) // PyLong_SHIFT
2266 digits. Using a_size = 1 + (a_bits - 1) // PyLong_SHIFT along with
2269 m // PyLong_SHIFT + n // PyLong_SHIFT <= (m + n) // PyLong_SHIFT
2270 m // PyLong_SHIFT - n // PyLong_SHIFT <=
2271 1 + (m - n - 1) // PyLong_SHIFT,
2273 valid for any integers m and n, we find that x_size satisfies
2275 x_size <= 2 + (DBL_MANT_DIG + 1) // PyLong_SHIFT
2279 if (a_bits
<= DBL_MANT_DIG
+ 2) {
2280 shift_digits
= (DBL_MANT_DIG
+ 2 - a_bits
) / PyLong_SHIFT
;
2281 shift_bits
= (DBL_MANT_DIG
+ 2 - a_bits
) % PyLong_SHIFT
;
2283 while (x_size
< shift_digits
)
2284 x_digits
[x_size
++] = 0;
2285 rem
= v_lshift(x_digits
+ x_size
, a
->ob_digit
, a_size
,
2288 x_digits
[x_size
++] = rem
;
2291 shift_digits
= (a_bits
- DBL_MANT_DIG
- 2) / PyLong_SHIFT
;
2292 shift_bits
= (a_bits
- DBL_MANT_DIG
- 2) % PyLong_SHIFT
;
2293 rem
= v_rshift(x_digits
, a
->ob_digit
+ shift_digits
,
2294 a_size
- shift_digits
, (int)shift_bits
);
2295 x_size
= a_size
- shift_digits
;
2296 /* For correct rounding below, we need the least significant
2297 bit of x to be 'sticky' for this shift: if any of the bits
2298 shifted out was nonzero, we set the least significant bit
2303 while (shift_digits
> 0)
2304 if (a
->ob_digit
[--shift_digits
]) {
2309 assert(1 <= x_size
&&
2310 x_size
<= (Py_ssize_t
)(sizeof(x_digits
)/sizeof(digit
)));
2312 /* Round, and convert to double. */
2313 x_digits
[0] += half_even_correction
[x_digits
[0] & 7];
2314 dx
= x_digits
[--x_size
];
2316 dx
= dx
* PyLong_BASE
+ x_digits
[--x_size
];
2318 /* Rescale; make correction if result is 1.0. */
2319 dx
/= 4.0 * EXP2_DBL_MANT_DIG
;
2321 if (a_bits
== PY_SSIZE_T_MAX
)
2328 return Py_SIZE(a
) < 0 ? -dx
: dx
;
2331 /* exponent > PY_SSIZE_T_MAX */
2332 PyErr_SetString(PyExc_OverflowError
,
2333 "huge integer: number of bits overflows a Py_ssize_t");
2338 /* Get a C double from a long int object. Rounds to the nearest double,
2339 using the round-half-to-even rule in the case of a tie. */
2342 PyLong_AsDouble(PyObject
*v
)
2344 Py_ssize_t exponent
;
2347 if (v
== NULL
|| !PyLong_Check(v
)) {
2348 PyErr_BadInternalCall();
2351 x
= _PyLong_Frexp((PyLongObject
*)v
, &exponent
);
2352 if ((x
== -1.0 && PyErr_Occurred()) || exponent
> DBL_MAX_EXP
) {
2353 PyErr_SetString(PyExc_OverflowError
,
2354 "long int too large to convert to float");
2357 return ldexp(x
, (int)exponent
);
2363 long_dealloc(PyObject
*v
)
2365 Py_TYPE(v
)->tp_free(v
);
2369 long_repr(PyObject
*v
)
2371 return _PyLong_Format(v
, 10, 1, 0);
2375 long_str(PyObject
*v
)
2377 return _PyLong_Format(v
, 10, 0, 0);
2381 long_compare(PyLongObject
*a
, PyLongObject
*b
)
2385 if (Py_SIZE(a
) != Py_SIZE(b
)) {
2386 sign
= Py_SIZE(a
) - Py_SIZE(b
);
2389 Py_ssize_t i
= ABS(Py_SIZE(a
));
2390 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2395 sign
= (sdigit
)a
->ob_digit
[i
] - (sdigit
)b
->ob_digit
[i
];
2400 return sign
< 0 ? -1 : sign
> 0 ? 1 : 0;
2404 long_hash(PyLongObject
*v
)
2410 /* This is designed so that Python ints and longs with the
2411 same value hash to the same value, otherwise comparisons
2412 of mapping keys will turn out weird */
2420 /* The following loop produces a C unsigned long x such that x is
2421 congruent to the absolute value of v modulo ULONG_MAX. The
2422 resulting x is nonzero if and only if v is. */
2424 /* Force a native long #-bits (32 or 64) circular shift */
2425 x
= (x
>> (8*SIZEOF_LONG
-PyLong_SHIFT
)) | (x
<< PyLong_SHIFT
);
2426 x
+= v
->ob_digit
[i
];
2427 /* If the addition above overflowed we compensate by
2428 incrementing. This preserves the value modulo
2430 if (x
< v
->ob_digit
[i
])
2434 if (x
== (unsigned long)-1)
2435 x
= (unsigned long)-2;
2440 /* Add the absolute values of two long integers. */
2442 static PyLongObject
*
2443 x_add(PyLongObject
*a
, PyLongObject
*b
)
2445 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2450 /* Ensure a is the larger of the two: */
2451 if (size_a
< size_b
) {
2452 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2453 { Py_ssize_t size_temp
= size_a
;
2455 size_b
= size_temp
; }
2457 z
= _PyLong_New(size_a
+1);
2460 for (i
= 0; i
< size_b
; ++i
) {
2461 carry
+= a
->ob_digit
[i
] + b
->ob_digit
[i
];
2462 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2463 carry
>>= PyLong_SHIFT
;
2465 for (; i
< size_a
; ++i
) {
2466 carry
+= a
->ob_digit
[i
];
2467 z
->ob_digit
[i
] = carry
& PyLong_MASK
;
2468 carry
>>= PyLong_SHIFT
;
2470 z
->ob_digit
[i
] = carry
;
2471 return long_normalize(z
);
2474 /* Subtract the absolute values of two integers. */
2476 static PyLongObject
*
2477 x_sub(PyLongObject
*a
, PyLongObject
*b
)
2479 Py_ssize_t size_a
= ABS(Py_SIZE(a
)), size_b
= ABS(Py_SIZE(b
));
2485 /* Ensure a is the larger of the two: */
2486 if (size_a
< size_b
) {
2488 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2489 { Py_ssize_t size_temp
= size_a
;
2491 size_b
= size_temp
; }
2493 else if (size_a
== size_b
) {
2494 /* Find highest digit where a and b differ: */
2496 while (--i
>= 0 && a
->ob_digit
[i
] == b
->ob_digit
[i
])
2499 return _PyLong_New(0);
2500 if (a
->ob_digit
[i
] < b
->ob_digit
[i
]) {
2502 { PyLongObject
*temp
= a
; a
= b
; b
= temp
; }
2504 size_a
= size_b
= i
+1;
2506 z
= _PyLong_New(size_a
);
2509 for (i
= 0; i
< size_b
; ++i
) {
2510 /* The following assumes unsigned arithmetic
2511 works module 2**N for some N>PyLong_SHIFT. */
2512 borrow
= a
->ob_digit
[i
] - b
->ob_digit
[i
] - borrow
;
2513 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2514 borrow
>>= PyLong_SHIFT
;
2515 borrow
&= 1; /* Keep only one sign bit */
2517 for (; i
< size_a
; ++i
) {
2518 borrow
= a
->ob_digit
[i
] - borrow
;
2519 z
->ob_digit
[i
] = borrow
& PyLong_MASK
;
2520 borrow
>>= PyLong_SHIFT
;
2521 borrow
&= 1; /* Keep only one sign bit */
2523 assert(borrow
== 0);
2525 z
->ob_size
= -(z
->ob_size
);
2526 return long_normalize(z
);
2530 long_add(PyLongObject
*v
, PyLongObject
*w
)
2532 PyLongObject
*a
, *b
, *z
;
2534 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
2536 if (a
->ob_size
< 0) {
2537 if (b
->ob_size
< 0) {
2539 if (z
!= NULL
&& z
->ob_size
!= 0)
2540 z
->ob_size
= -(z
->ob_size
);
2553 return (PyObject
*)z
;
2557 long_sub(PyLongObject
*v
, PyLongObject
*w
)
2559 PyLongObject
*a
, *b
, *z
;
2561 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
2563 if (a
->ob_size
< 0) {
2568 if (z
!= NULL
&& z
->ob_size
!= 0)
2569 z
->ob_size
= -(z
->ob_size
);
2579 return (PyObject
*)z
;
2582 /* Grade school multiplication, ignoring the signs.
2583 * Returns the absolute value of the product, or NULL if error.
2585 static PyLongObject
*
2586 x_mul(PyLongObject
*a
, PyLongObject
*b
)
2589 Py_ssize_t size_a
= ABS(Py_SIZE(a
));
2590 Py_ssize_t size_b
= ABS(Py_SIZE(b
));
2593 z
= _PyLong_New(size_a
+ size_b
);
2597 memset(z
->ob_digit
, 0, Py_SIZE(z
) * sizeof(digit
));
2599 /* Efficient squaring per HAC, Algorithm 14.16:
2600 * http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
2601 * Gives slightly less than a 2x speedup when a == b,
2602 * via exploiting that each entry in the multiplication
2603 * pyramid appears twice (except for the size_a squares).
2605 for (i
= 0; i
< size_a
; ++i
) {
2607 twodigits f
= a
->ob_digit
[i
];
2608 digit
*pz
= z
->ob_digit
+ (i
<< 1);
2609 digit
*pa
= a
->ob_digit
+ i
+ 1;
2610 digit
*paend
= a
->ob_digit
+ size_a
;
2617 carry
= *pz
+ f
* f
;
2618 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2619 carry
>>= PyLong_SHIFT
;
2620 assert(carry
<= PyLong_MASK
);
2622 /* Now f is added in twice in each column of the
2623 * pyramid it appears. Same as adding f<<1 once.
2626 while (pa
< paend
) {
2627 carry
+= *pz
+ *pa
++ * f
;
2628 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2629 carry
>>= PyLong_SHIFT
;
2630 assert(carry
<= (PyLong_MASK
<< 1));
2634 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2635 carry
>>= PyLong_SHIFT
;
2638 *pz
+= (digit
)(carry
& PyLong_MASK
);
2639 assert((carry
>> PyLong_SHIFT
) == 0);
2642 else { /* a is not the same as b -- gradeschool long mult */
2643 for (i
= 0; i
< size_a
; ++i
) {
2644 twodigits carry
= 0;
2645 twodigits f
= a
->ob_digit
[i
];
2646 digit
*pz
= z
->ob_digit
+ i
;
2647 digit
*pb
= b
->ob_digit
;
2648 digit
*pbend
= b
->ob_digit
+ size_b
;
2655 while (pb
< pbend
) {
2656 carry
+= *pz
+ *pb
++ * f
;
2657 *pz
++ = (digit
)(carry
& PyLong_MASK
);
2658 carry
>>= PyLong_SHIFT
;
2659 assert(carry
<= PyLong_MASK
);
2662 *pz
+= (digit
)(carry
& PyLong_MASK
);
2663 assert((carry
>> PyLong_SHIFT
) == 0);
2666 return long_normalize(z
);
2669 /* A helper for Karatsuba multiplication (k_mul).
2670 Takes a long "n" and an integer "size" representing the place to
2671 split, and sets low and high such that abs(n) == (high << size) + low,
2672 viewing the shift as being by digits. The sign bit is ignored, and
2673 the return values are >= 0.
2674 Returns 0 on success, -1 on failure.
2677 kmul_split(PyLongObject
*n
,
2679 PyLongObject
**high
,
2682 PyLongObject
*hi
, *lo
;
2683 Py_ssize_t size_lo
, size_hi
;
2684 const Py_ssize_t size_n
= ABS(Py_SIZE(n
));
2686 size_lo
= MIN(size_n
, size
);
2687 size_hi
= size_n
- size_lo
;
2689 if ((hi
= _PyLong_New(size_hi
)) == NULL
)
2691 if ((lo
= _PyLong_New(size_lo
)) == NULL
) {
2696 memcpy(lo
->ob_digit
, n
->ob_digit
, size_lo
* sizeof(digit
));
2697 memcpy(hi
->ob_digit
, n
->ob_digit
+ size_lo
, size_hi
* sizeof(digit
));
2699 *high
= long_normalize(hi
);
2700 *low
= long_normalize(lo
);
2704 static PyLongObject
*k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
);
2706 /* Karatsuba multiplication. Ignores the input signs, and returns the
2707 * absolute value of the product (or NULL if error).
2708 * See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
2710 static PyLongObject
*
2711 k_mul(PyLongObject
*a
, PyLongObject
*b
)
2713 Py_ssize_t asize
= ABS(Py_SIZE(a
));
2714 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2715 PyLongObject
*ah
= NULL
;
2716 PyLongObject
*al
= NULL
;
2717 PyLongObject
*bh
= NULL
;
2718 PyLongObject
*bl
= NULL
;
2719 PyLongObject
*ret
= NULL
;
2720 PyLongObject
*t1
, *t2
, *t3
;
2721 Py_ssize_t shift
; /* the number of digits we split off */
2724 /* (ah*X+al)(bh*X+bl) = ah*bh*X*X + (ah*bl + al*bh)*X + al*bl
2725 * Let k = (ah+al)*(bh+bl) = ah*bl + al*bh + ah*bh + al*bl
2726 * Then the original product is
2727 * ah*bh*X*X + (k - ah*bh - al*bl)*X + al*bl
2728 * By picking X to be a power of 2, "*X" is just shifting, and it's
2729 * been reduced to 3 multiplies on numbers half the size.
2732 /* We want to split based on the larger number; fiddle so that b
2735 if (asize
> bsize
) {
2745 /* Use gradeschool math when either number is too small. */
2746 i
= a
== b
? KARATSUBA_SQUARE_CUTOFF
: KARATSUBA_CUTOFF
;
2749 return _PyLong_New(0);
2754 /* If a is small compared to b, splitting on b gives a degenerate
2755 * case with ah==0, and Karatsuba may be (even much) less efficient
2756 * than "grade school" then. However, we can still win, by viewing
2757 * b as a string of "big digits", each of width a->ob_size. That
2758 * leads to a sequence of balanced calls to k_mul.
2760 if (2 * asize
<= bsize
)
2761 return k_lopsided_mul(a
, b
);
2763 /* Split a & b into hi & lo pieces. */
2765 if (kmul_split(a
, shift
, &ah
, &al
) < 0) goto fail
;
2766 assert(Py_SIZE(ah
) > 0); /* the split isn't degenerate */
2774 else if (kmul_split(b
, shift
, &bh
, &bl
) < 0) goto fail
;
2777 * 1. Allocate result space (asize + bsize digits: that's always
2779 * 2. Compute ah*bh, and copy into result at 2*shift.
2780 * 3. Compute al*bl, and copy into result at 0. Note that this
2781 * can't overlap with #2.
2782 * 4. Subtract al*bl from the result, starting at shift. This may
2783 * underflow (borrow out of the high digit), but we don't care:
2784 * we're effectively doing unsigned arithmetic mod
2785 * PyLong_BASE**(sizea + sizeb), and so long as the *final* result fits,
2786 * borrows and carries out of the high digit can be ignored.
2787 * 5. Subtract ah*bh from the result, starting at shift.
2788 * 6. Compute (ah+al)*(bh+bl), and add it into the result starting
2792 /* 1. Allocate result space. */
2793 ret
= _PyLong_New(asize
+ bsize
);
2794 if (ret
== NULL
) goto fail
;
2796 /* Fill with trash, to catch reference to uninitialized digits. */
2797 memset(ret
->ob_digit
, 0xDF, Py_SIZE(ret
) * sizeof(digit
));
2800 /* 2. t1 <- ah*bh, and copy into high digits of result. */
2801 if ((t1
= k_mul(ah
, bh
)) == NULL
) goto fail
;
2802 assert(Py_SIZE(t1
) >= 0);
2803 assert(2*shift
+ Py_SIZE(t1
) <= Py_SIZE(ret
));
2804 memcpy(ret
->ob_digit
+ 2*shift
, t1
->ob_digit
,
2805 Py_SIZE(t1
) * sizeof(digit
));
2807 /* Zero-out the digits higher than the ah*bh copy. */
2808 i
= Py_SIZE(ret
) - 2*shift
- Py_SIZE(t1
);
2810 memset(ret
->ob_digit
+ 2*shift
+ Py_SIZE(t1
), 0,
2813 /* 3. t2 <- al*bl, and copy into the low digits. */
2814 if ((t2
= k_mul(al
, bl
)) == NULL
) {
2818 assert(Py_SIZE(t2
) >= 0);
2819 assert(Py_SIZE(t2
) <= 2*shift
); /* no overlap with high digits */
2820 memcpy(ret
->ob_digit
, t2
->ob_digit
, Py_SIZE(t2
) * sizeof(digit
));
2822 /* Zero out remaining digits. */
2823 i
= 2*shift
- Py_SIZE(t2
); /* number of uninitialized digits */
2825 memset(ret
->ob_digit
+ Py_SIZE(t2
), 0, i
* sizeof(digit
));
2827 /* 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
2828 * because it's fresher in cache.
2830 i
= Py_SIZE(ret
) - shift
; /* # digits after shift */
2831 (void)v_isub(ret
->ob_digit
+ shift
, i
, t2
->ob_digit
, Py_SIZE(t2
));
2834 (void)v_isub(ret
->ob_digit
+ shift
, i
, t1
->ob_digit
, Py_SIZE(t1
));
2837 /* 6. t3 <- (ah+al)(bh+bl), and add into result. */
2838 if ((t1
= x_add(ah
, al
)) == NULL
) goto fail
;
2847 else if ((t2
= x_add(bh
, bl
)) == NULL
) {
2858 if (t3
== NULL
) goto fail
;
2859 assert(Py_SIZE(t3
) >= 0);
2861 /* Add t3. It's not obvious why we can't run out of room here.
2862 * See the (*) comment after this function.
2864 (void)v_iadd(ret
->ob_digit
+ shift
, i
, t3
->ob_digit
, Py_SIZE(t3
));
2867 return long_normalize(ret
);
2878 /* (*) Why adding t3 can't "run out of room" above.
2880 Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
2883 1. For any integer i, i = c(i/2) + f(i/2). In particular,
2884 bsize = c(bsize/2) + f(bsize/2).
2885 2. shift = f(bsize/2)
2887 4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
2888 routine, so asize > bsize/2 >= f(bsize/2) in this routine.
2890 We allocated asize + bsize result digits, and add t3 into them at an offset
2891 of shift. This leaves asize+bsize-shift allocated digit positions for t3
2892 to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
2893 asize + c(bsize/2) available digit positions.
2895 bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
2896 at most c(bsize/2) digits + 1 bit.
2898 If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
2899 digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
2900 most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
2902 The product (ah+al)*(bh+bl) therefore has at most
2904 c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
2906 and we have asize + c(bsize/2) available digit positions. We need to show
2907 this is always enough. An instance of c(bsize/2) cancels out in both, so
2908 the question reduces to whether asize digits is enough to hold
2909 (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
2910 then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
2911 asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
2912 digit is enough to hold 2 bits. This is so since PyLong_SHIFT=15 >= 2. If
2913 asize == bsize, then we're asking whether bsize digits is enough to hold
2914 c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
2915 is enough to hold 2 bits. This is so if bsize >= 2, which holds because
2916 bsize >= KARATSUBA_CUTOFF >= 2.
2918 Note that since there's always enough room for (ah+al)*(bh+bl), and that's
2919 clearly >= each of ah*bh and al*bl, there's always enough room to subtract
2920 ah*bh and al*bl too.
2923 /* b has at least twice the digits of a, and a is big enough that Karatsuba
2924 * would pay off *if* the inputs had balanced sizes. View b as a sequence
2925 * of slices, each with a->ob_size digits, and multiply the slices by a,
2926 * one at a time. This gives k_mul balanced inputs to work with, and is
2927 * also cache-friendly (we compute one double-width slice of the result
2928 * at a time, then move on, never backtracking except for the helpful
2929 * single-width slice overlap between successive partial sums).
2931 static PyLongObject
*
2932 k_lopsided_mul(PyLongObject
*a
, PyLongObject
*b
)
2934 const Py_ssize_t asize
= ABS(Py_SIZE(a
));
2935 Py_ssize_t bsize
= ABS(Py_SIZE(b
));
2936 Py_ssize_t nbdone
; /* # of b digits already multiplied */
2938 PyLongObject
*bslice
= NULL
;
2940 assert(asize
> KARATSUBA_CUTOFF
);
2941 assert(2 * asize
<= bsize
);
2943 /* Allocate result space, and zero it out. */
2944 ret
= _PyLong_New(asize
+ bsize
);
2947 memset(ret
->ob_digit
, 0, Py_SIZE(ret
) * sizeof(digit
));
2949 /* Successive slices of b are copied into bslice. */
2950 bslice
= _PyLong_New(asize
);
2956 PyLongObject
*product
;
2957 const Py_ssize_t nbtouse
= MIN(bsize
, asize
);
2959 /* Multiply the next slice of b by a. */
2960 memcpy(bslice
->ob_digit
, b
->ob_digit
+ nbdone
,
2961 nbtouse
* sizeof(digit
));
2962 Py_SIZE(bslice
) = nbtouse
;
2963 product
= k_mul(a
, bslice
);
2964 if (product
== NULL
)
2967 /* Add into result. */
2968 (void)v_iadd(ret
->ob_digit
+ nbdone
, Py_SIZE(ret
) - nbdone
,
2969 product
->ob_digit
, Py_SIZE(product
));
2977 return long_normalize(ret
);
2986 long_mul(PyLongObject
*v
, PyLongObject
*w
)
2988 PyLongObject
*a
, *b
, *z
;
2990 if (!convert_binop((PyObject
*)v
, (PyObject
*)w
, &a
, &b
)) {
2991 Py_INCREF(Py_NotImplemented
);
2992 return Py_NotImplemented
;
2996 /* Negate if exactly one of the inputs is negative. */
2997 if (((a
->ob_size
^ b
->ob_size
) < 0) && z
)
2998 z
->ob_size
= -(z
->ob_size
);
3001 return (PyObject
*)z
;
3004 /* The / and % operators are now defined in terms of divmod().
3005 The expression a mod b has the value a - b*floor(a/b).
3006 The long_divrem function gives the remainder after division of
3007 |a| by |b|, with the sign of a. This is also expressed
3008 as a - b*trunc(a/b), if trunc truncates towards zero.
3015 So, to get from rem to mod, we have to add b if a and b
3016 have different signs. We then subtract one from the 'div'
3017 part of the outcome to keep the invariant intact. */
3020 * *pdiv, *pmod = divmod(v, w)
3021 * NULL can be passed for pdiv or pmod, in which case that part of
3022 * the result is simply thrown away. The caller owns a reference to
3023 * each of these it requests (does not pass NULL for).
3026 l_divmod(PyLongObject
*v
, PyLongObject
*w
,
3027 PyLongObject
**pdiv
, PyLongObject
**pmod
)
3029 PyLongObject
*div
, *mod
;
3031 if (long_divrem(v
, w
, &div
, &mod
) < 0)
3033 if ((Py_SIZE(mod
) < 0 && Py_SIZE(w
) > 0) ||
3034 (Py_SIZE(mod
) > 0 && Py_SIZE(w
) < 0)) {
3037 temp
= (PyLongObject
*) long_add(mod
, w
);
3044 one
= (PyLongObject
*) PyLong_FromLong(1L);
3046 (temp
= (PyLongObject
*) long_sub(div
, one
)) == NULL
) {
3070 long_div(PyObject
*v
, PyObject
*w
)
3072 PyLongObject
*a
, *b
, *div
;
3074 CONVERT_BINOP(v
, w
, &a
, &b
);
3075 if (l_divmod(a
, b
, &div
, NULL
) < 0)
3079 return (PyObject
*)div
;
3083 long_classic_div(PyObject
*v
, PyObject
*w
)
3085 PyLongObject
*a
, *b
, *div
;
3087 CONVERT_BINOP(v
, w
, &a
, &b
);
3088 if (Py_DivisionWarningFlag
&&
3089 PyErr_Warn(PyExc_DeprecationWarning
, "classic long division") < 0)
3091 else if (l_divmod(a
, b
, &div
, NULL
) < 0)
3095 return (PyObject
*)div
;
3098 /* PyLong/PyLong -> float, with correctly rounded result. */
3100 #define MANT_DIG_DIGITS (DBL_MANT_DIG / PyLong_SHIFT)
3101 #define MANT_DIG_BITS (DBL_MANT_DIG % PyLong_SHIFT)
3104 long_true_divide(PyObject
*v
, PyObject
*w
)
3106 PyLongObject
*a
, *b
, *x
;
3107 Py_ssize_t a_size
, b_size
, shift
, extra_bits
, diff
, x_size
, x_bits
;
3109 int inexact
, negate
, a_is_small
, b_is_small
;
3112 CONVERT_BINOP(v
, w
, &a
, &b
);
3115 Method in a nutshell:
3117 0. reduce to case a, b > 0; filter out obvious underflow/overflow
3118 1. choose a suitable integer 'shift'
3119 2. use integer arithmetic to compute x = floor(2**-shift*a/b)
3120 3. adjust x for correct rounding
3121 4. convert x to a double dx with the same value
3122 5. return ldexp(dx, shift).
3126 0. For any a, a/0 raises ZeroDivisionError; for nonzero b, 0/b
3127 returns either 0.0 or -0.0, depending on the sign of b. For a and
3128 b both nonzero, ignore signs of a and b, and add the sign back in
3129 at the end. Now write a_bits and b_bits for the bit lengths of a
3130 and b respectively (that is, a_bits = 1 + floor(log_2(a)); likewise
3133 2**(a_bits - b_bits - 1) < a/b < 2**(a_bits - b_bits + 1).
3135 So if a_bits - b_bits > DBL_MAX_EXP then a/b > 2**DBL_MAX_EXP and
3136 so overflows. Similarly, if a_bits - b_bits < DBL_MIN_EXP -
3137 DBL_MANT_DIG - 1 then a/b underflows to 0. With these cases out of
3138 the way, we can assume that
3140 DBL_MIN_EXP - DBL_MANT_DIG - 1 <= a_bits - b_bits <= DBL_MAX_EXP.
3142 1. The integer 'shift' is chosen so that x has the right number of
3143 bits for a double, plus two or three extra bits that will be used
3144 in the rounding decisions. Writing a_bits and b_bits for the
3145 number of significant bits in a and b respectively, a
3146 straightforward formula for shift is:
3148 shift = a_bits - b_bits - DBL_MANT_DIG - 2
3150 This is fine in the usual case, but if a/b is smaller than the
3151 smallest normal float then it can lead to double rounding on an
3152 IEEE 754 platform, giving incorrectly rounded results. So we
3153 adjust the formula slightly. The actual formula used is:
3155 shift = MAX(a_bits - b_bits, DBL_MIN_EXP) - DBL_MANT_DIG - 2
3157 2. The quantity x is computed by first shifting a (left -shift bits
3158 if shift <= 0, right shift bits if shift > 0) and then dividing by
3159 b. For both the shift and the division, we keep track of whether
3160 the result is inexact, in a flag 'inexact'; this information is
3161 needed at the rounding stage.
3163 With the choice of shift above, together with our assumption that
3164 a_bits - b_bits >= DBL_MIN_EXP - DBL_MANT_DIG - 1, it follows
3167 3. Now x * 2**shift <= a/b < (x+1) * 2**shift. We want to replace
3168 this with an exactly representable float of the form
3170 round(x/2**extra_bits) * 2**(extra_bits+shift).
3172 For float representability, we need x/2**extra_bits <
3173 2**DBL_MANT_DIG and extra_bits + shift >= DBL_MIN_EXP -
3174 DBL_MANT_DIG. This translates to the condition:
3176 extra_bits >= MAX(x_bits, DBL_MIN_EXP - shift) - DBL_MANT_DIG
3178 To round, we just modify the bottom digit of x in-place; this can
3179 end up giving a digit with value > PyLONG_MASK, but that's not a
3180 problem since digits can hold values up to 2*PyLONG_MASK+1.
3182 With the original choices for shift above, extra_bits will always
3183 be 2 or 3. Then rounding under the round-half-to-even rule, we
3184 round up iff the most significant of the extra bits is 1, and
3185 either: (a) the computation of x in step 2 had an inexact result,
3186 or (b) at least one other of the extra bits is 1, or (c) the least
3187 significant bit of x (above those to be rounded) is 1.
3189 4. Conversion to a double is straightforward; all floating-point
3190 operations involved in the conversion are exact, so there's no
3191 danger of rounding errors.
3193 5. Use ldexp(x, shift) to compute x*2**shift, the final result.
3194 The result will always be exactly representable as a double, except
3195 in the case that it overflows. To avoid dependence on the exact
3196 behaviour of ldexp on overflow, we check for overflow before
3197 applying ldexp. The result of ldexp is adjusted for sign before
3201 /* Reduce to case where a and b are both positive. */
3202 a_size
= ABS(Py_SIZE(a
));
3203 b_size
= ABS(Py_SIZE(b
));
3204 negate
= (Py_SIZE(a
) < 0) ^ (Py_SIZE(b
) < 0);
3206 PyErr_SetString(PyExc_ZeroDivisionError
,
3207 "division by zero");
3211 goto underflow_or_zero
;
3213 /* Fast path for a and b small (exactly representable in a double).
3214 Relies on floating-point division being correctly rounded; results
3215 may be subject to double rounding on x86 machines that operate with
3216 the x87 FPU set to 64-bit precision. */
3217 a_is_small
= a_size
<= MANT_DIG_DIGITS
||
3218 (a_size
== MANT_DIG_DIGITS
+1 &&
3219 a
->ob_digit
[MANT_DIG_DIGITS
] >> MANT_DIG_BITS
== 0);
3220 b_is_small
= b_size
<= MANT_DIG_DIGITS
||
3221 (b_size
== MANT_DIG_DIGITS
+1 &&
3222 b
->ob_digit
[MANT_DIG_DIGITS
] >> MANT_DIG_BITS
== 0);
3223 if (a_is_small
&& b_is_small
) {
3225 da
= a
->ob_digit
[--a_size
];
3227 da
= da
* PyLong_BASE
+ a
->ob_digit
[--a_size
];
3228 db
= b
->ob_digit
[--b_size
];
3230 db
= db
* PyLong_BASE
+ b
->ob_digit
[--b_size
];
3235 /* Catch obvious cases of underflow and overflow */
3236 diff
= a_size
- b_size
;
3237 if (diff
> PY_SSIZE_T_MAX
/PyLong_SHIFT
- 1)
3238 /* Extreme overflow */
3240 else if (diff
< 1 - PY_SSIZE_T_MAX
/PyLong_SHIFT
)
3241 /* Extreme underflow */
3242 goto underflow_or_zero
;
3243 /* Next line is now safe from overflowing a Py_ssize_t */
3244 diff
= diff
* PyLong_SHIFT
+ bits_in_digit(a
->ob_digit
[a_size
- 1]) -
3245 bits_in_digit(b
->ob_digit
[b_size
- 1]);
3246 /* Now diff = a_bits - b_bits. */
3247 if (diff
> DBL_MAX_EXP
)
3249 else if (diff
< DBL_MIN_EXP
- DBL_MANT_DIG
- 1)
3250 goto underflow_or_zero
;
3252 /* Choose value for shift; see comments for step 1 above. */
3253 shift
= MAX(diff
, DBL_MIN_EXP
) - DBL_MANT_DIG
- 2;
3257 /* x = abs(a * 2**-shift) */
3259 Py_ssize_t i
, shift_digits
= -shift
/ PyLong_SHIFT
;
3261 /* x = a << -shift */
3262 if (a_size
>= PY_SSIZE_T_MAX
- 1 - shift_digits
) {
3263 /* In practice, it's probably impossible to end up
3264 here. Both a and b would have to be enormous,
3265 using close to SIZE_T_MAX bytes of memory each. */
3266 PyErr_SetString(PyExc_OverflowError
,
3267 "intermediate overflow during division");
3270 x
= _PyLong_New(a_size
+ shift_digits
+ 1);
3273 for (i
= 0; i
< shift_digits
; i
++)
3275 rem
= v_lshift(x
->ob_digit
+ shift_digits
, a
->ob_digit
,
3276 a_size
, -shift
% PyLong_SHIFT
);
3277 x
->ob_digit
[a_size
+ shift_digits
] = rem
;
3280 Py_ssize_t shift_digits
= shift
/ PyLong_SHIFT
;
3282 /* x = a >> shift */
3283 assert(a_size
>= shift_digits
);
3284 x
= _PyLong_New(a_size
- shift_digits
);
3287 rem
= v_rshift(x
->ob_digit
, a
->ob_digit
+ shift_digits
,
3288 a_size
- shift_digits
, shift
% PyLong_SHIFT
);
3289 /* set inexact if any of the bits shifted out is nonzero */
3292 while (!inexact
&& shift_digits
> 0)
3293 if (a
->ob_digit
[--shift_digits
])
3297 x_size
= Py_SIZE(x
);
3299 /* x //= b. If the remainder is nonzero, set inexact. We own the only
3300 reference to x, so it's safe to modify it in-place. */
3302 digit rem
= inplace_divrem1(x
->ob_digit
, x
->ob_digit
, x_size
,
3309 PyLongObject
*div
, *rem
;
3310 div
= x_divrem(x
, b
, &rem
);
3319 x_size
= ABS(Py_SIZE(x
));
3320 assert(x_size
> 0); /* result of division is never zero */
3321 x_bits
= (x_size
-1)*PyLong_SHIFT
+bits_in_digit(x
->ob_digit
[x_size
-1]);
3323 /* The number of extra bits that have to be rounded away. */
3324 extra_bits
= MAX(x_bits
, DBL_MIN_EXP
- shift
) - DBL_MANT_DIG
;
3325 assert(extra_bits
== 2 || extra_bits
== 3);
3327 /* Round by directly modifying the low digit of x. */
3328 mask
= (digit
)1 << (extra_bits
- 1);
3329 low
= x
->ob_digit
[0] | inexact
;
3330 if (low
& mask
&& low
& (3*mask
-1))
3332 x
->ob_digit
[0] = low
& ~(mask
-1U);
3334 /* Convert x to a double dx; the conversion is exact. */
3335 dx
= x
->ob_digit
[--x_size
];
3337 dx
= dx
* PyLong_BASE
+ x
->ob_digit
[--x_size
];
3340 /* Check whether ldexp result will overflow a double. */
3341 if (shift
+ x_bits
>= DBL_MAX_EXP
&&
3342 (shift
+ x_bits
> DBL_MAX_EXP
|| dx
== ldexp(1.0, (int)x_bits
)))
3344 result
= ldexp(dx
, (int)shift
);
3349 return PyFloat_FromDouble(negate
? -result
: result
);
3354 return PyFloat_FromDouble(negate
? -0.0 : 0.0);
3357 PyErr_SetString(PyExc_OverflowError
,
3358 "integer division result too large for a float");
3366 long_mod(PyObject
*v
, PyObject
*w
)
3368 PyLongObject
*a
, *b
, *mod
;
3370 CONVERT_BINOP(v
, w
, &a
, &b
);
3372 if (l_divmod(a
, b
, NULL
, &mod
) < 0)
3376 return (PyObject
*)mod
;
3380 long_divmod(PyObject
*v
, PyObject
*w
)
3382 PyLongObject
*a
, *b
, *div
, *mod
;
3385 CONVERT_BINOP(v
, w
, &a
, &b
);
3387 if (l_divmod(a
, b
, &div
, &mod
) < 0) {
3394 PyTuple_SetItem(z
, 0, (PyObject
*) div
);
3395 PyTuple_SetItem(z
, 1, (PyObject
*) mod
);
3408 long_pow(PyObject
*v
, PyObject
*w
, PyObject
*x
)
3410 PyLongObject
*a
, *b
, *c
; /* a,b,c = v,w,x */
3411 int negativeOutput
= 0; /* if x<0 return negative output */
3413 PyLongObject
*z
= NULL
; /* accumulated result */
3414 Py_ssize_t i
, j
, k
; /* counters */
3415 PyLongObject
*temp
= NULL
;
3417 /* 5-ary values. If the exponent is large enough, table is
3418 * precomputed so that table[i] == a**i % c for i in range(32).
3420 PyLongObject
*table
[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3423 /* a, b, c = v, w, x */
3424 CONVERT_BINOP(v
, w
, &a
, &b
);
3425 if (PyLong_Check(x
)) {
3426 c
= (PyLongObject
*)x
;
3429 else if (PyInt_Check(x
)) {
3430 c
= (PyLongObject
*)PyLong_FromLong(PyInt_AS_LONG(x
));
3434 else if (x
== Py_None
)
3439 Py_INCREF(Py_NotImplemented
);
3440 return Py_NotImplemented
;
3443 if (Py_SIZE(b
) < 0) { /* if exponent is negative */
3445 PyErr_SetString(PyExc_TypeError
, "pow() 2nd argument "
3446 "cannot be negative when 3rd argument specified");
3450 /* else return a float. This works because we know
3451 that this calls float_pow() which converts its
3452 arguments to double. */
3455 return PyFloat_Type
.tp_as_number
->nb_power(v
, w
, x
);
3461 raise ValueError() */
3462 if (Py_SIZE(c
) == 0) {
3463 PyErr_SetString(PyExc_ValueError
,
3464 "pow() 3rd argument cannot be 0");
3469 negativeOutput = True
3470 modulus = -modulus */
3471 if (Py_SIZE(c
) < 0) {
3473 temp
= (PyLongObject
*)_PyLong_Copy(c
);
3479 c
->ob_size
= - c
->ob_size
;
3484 if ((Py_SIZE(c
) == 1) && (c
->ob_digit
[0] == 1)) {
3485 z
= (PyLongObject
*)PyLong_FromLong(0L);
3489 /* Reduce base by modulus in some cases:
3490 1. If base < 0. Forcing the base non-negative makes things easier.
3491 2. If base is obviously larger than the modulus. The "small
3492 exponent" case later can multiply directly by base repeatedly,
3493 while the "large exponent" case multiplies directly by base 31
3494 times. It can be unboundedly faster to multiply by
3495 base % modulus instead.
3496 We could _always_ do this reduction, but l_divmod() isn't cheap,
3497 so we only do it when it buys something. */
3498 if (Py_SIZE(a
) < 0 || Py_SIZE(a
) > Py_SIZE(c
)) {
3499 if (l_divmod(a
, c
, NULL
, &temp
) < 0)
3507 /* At this point a, b, and c are guaranteed non-negative UNLESS
3508 c is NULL, in which case a may be negative. */
3510 z
= (PyLongObject
*)PyLong_FromLong(1L);
3514 /* Perform a modular reduction, X = X % c, but leave X alone if c
3520 if (l_divmod(X, c, NULL, &temp) < 0) \
3528 /* Multiply two values, then reduce the result:
3529 result = X*Y % c. If c is NULL, skip the mod. */
3530 #define MULT(X, Y, result) \
3532 temp = (PyLongObject *)long_mul(X, Y); \
3535 Py_XDECREF(result); \
3541 if (Py_SIZE(b
) <= FIVEARY_CUTOFF
) {
3542 /* Left-to-right binary exponentiation (HAC Algorithm 14.79) */
3543 /* http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf */
3544 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3545 digit bi
= b
->ob_digit
[i
];
3547 for (j
= (digit
)1 << (PyLong_SHIFT
-1); j
!= 0; j
>>= 1) {
3555 /* Left-to-right 5-ary exponentiation (HAC Algorithm 14.82) */
3556 Py_INCREF(z
); /* still holds 1L */
3558 for (i
= 1; i
< 32; ++i
)
3559 MULT(table
[i
-1], a
, table
[i
]);
3561 for (i
= Py_SIZE(b
) - 1; i
>= 0; --i
) {
3562 const digit bi
= b
->ob_digit
[i
];
3564 for (j
= PyLong_SHIFT
- 5; j
>= 0; j
-= 5) {
3565 const int index
= (bi
>> j
) & 0x1f;
3566 for (k
= 0; k
< 5; ++k
)
3569 MULT(z
, table
[index
], z
);
3574 if (negativeOutput
&& (Py_SIZE(z
) != 0)) {
3575 temp
= (PyLongObject
*)long_sub(z
, c
);
3591 if (Py_SIZE(b
) > FIVEARY_CUTOFF
) {
3592 for (i
= 0; i
< 32; ++i
)
3593 Py_XDECREF(table
[i
]);
3599 return (PyObject
*)z
;
3603 long_invert(PyLongObject
*v
)
3605 /* Implement ~x as -(x+1) */
3608 w
= (PyLongObject
*)PyLong_FromLong(1L);
3611 x
= (PyLongObject
*) long_add(v
, w
);
3615 Py_SIZE(x
) = -(Py_SIZE(x
));
3616 return (PyObject
*)x
;
3620 long_neg(PyLongObject
*v
)
3623 if (v
->ob_size
== 0 && PyLong_CheckExact(v
)) {
3626 return (PyObject
*) v
;
3628 z
= (PyLongObject
*)_PyLong_Copy(v
);
3630 z
->ob_size
= -(v
->ob_size
);
3631 return (PyObject
*)z
;
3635 long_abs(PyLongObject
*v
)
3640 return long_long((PyObject
*)v
);
3644 long_nonzero(PyLongObject
*v
)
3646 return Py_SIZE(v
) != 0;
3650 long_rshift(PyLongObject
*v
, PyLongObject
*w
)
3652 PyLongObject
*a
, *b
;
3653 PyLongObject
*z
= NULL
;
3654 Py_ssize_t shiftby
, newsize
, wordshift
, loshift
, hishift
, i
, j
;
3655 digit lomask
, himask
;
3657 CONVERT_BINOP((PyObject
*)v
, (PyObject
*)w
, &a
, &b
);
3659 if (Py_SIZE(a
) < 0) {
3660 /* Right shifting negative numbers is harder */
3661 PyLongObject
*a1
, *a2
;
3662 a1
= (PyLongObject
*) long_invert(a
);
3665 a2
= (PyLongObject
*) long_rshift(a1
, b
);
3669 z
= (PyLongObject
*) long_invert(a2
);
3673 shiftby
= PyLong_AsSsize_t((PyObject
*)b
);
3674 if (shiftby
== -1L && PyErr_Occurred())
3677 PyErr_SetString(PyExc_ValueError
,
3678 "negative shift count");
3681 wordshift
= shiftby
/ PyLong_SHIFT
;
3682 newsize
= ABS(Py_SIZE(a
)) - wordshift
;
3687 return (PyObject
*)z
;
3689 loshift
= shiftby
% PyLong_SHIFT
;
3690 hishift
= PyLong_SHIFT
- loshift
;
3691 lomask
= ((digit
)1 << hishift
) - 1;
3692 himask
= PyLong_MASK
^ lomask
;
3693 z
= _PyLong_New(newsize
);
3697 Py_SIZE(z
) = -(Py_SIZE(z
));
3698 for (i
= 0, j
= wordshift
; i
< newsize
; i
++, j
++) {
3699 z
->ob_digit
[i
] = (a
->ob_digit
[j
] >> loshift
) & lomask
;
3701 z
->ob_digit
[i
] |= (a
->ob_digit
[j
+1] << hishift
) & himask
;
3703 z
= long_normalize(z
);
3708 return (PyObject
*) z
;
3713 long_lshift(PyObject
*v
, PyObject
*w
)
3715 /* This version due to Tim Peters */
3716 PyLongObject
*a
, *b
;
3717 PyLongObject
*z
= NULL
;
3718 Py_ssize_t shiftby
, oldsize
, newsize
, wordshift
, remshift
, i
, j
;
3721 CONVERT_BINOP(v
, w
, &a
, &b
);
3723 shiftby
= PyLong_AsSsize_t((PyObject
*)b
);
3724 if (shiftby
== -1L && PyErr_Occurred())
3727 PyErr_SetString(PyExc_ValueError
, "negative shift count");
3730 /* wordshift, remshift = divmod(shiftby, PyLong_SHIFT) */
3731 wordshift
= shiftby
/ PyLong_SHIFT
;
3732 remshift
= shiftby
- wordshift
* PyLong_SHIFT
;
3734 oldsize
= ABS(a
->ob_size
);
3735 newsize
= oldsize
+ wordshift
;
3738 z
= _PyLong_New(newsize
);
3742 z
->ob_size
= -(z
->ob_size
);
3743 for (i
= 0; i
< wordshift
; i
++)
3746 for (i
= wordshift
, j
= 0; j
< oldsize
; i
++, j
++) {
3747 accum
|= (twodigits
)a
->ob_digit
[j
] << remshift
;
3748 z
->ob_digit
[i
] = (digit
)(accum
& PyLong_MASK
);
3749 accum
>>= PyLong_SHIFT
;
3752 z
->ob_digit
[newsize
-1] = (digit
)accum
;
3755 z
= long_normalize(z
);
3759 return (PyObject
*) z
;
3762 /* Compute two's complement of digit vector a[0:m], writing result to
3763 z[0:m]. The digit vector a need not be normalized, but should not
3764 be entirely zero. a and z may point to the same digit vector. */
3767 v_complement(digit
*z
, digit
*a
, Py_ssize_t m
)
3771 for (i
= 0; i
< m
; ++i
) {
3772 carry
+= a
[i
] ^ PyLong_MASK
;
3773 z
[i
] = carry
& PyLong_MASK
;
3774 carry
>>= PyLong_SHIFT
;
3779 /* Bitwise and/xor/or operations */
3782 long_bitwise(PyLongObject
*a
,
3783 int op
, /* '&', '|', '^' */
3786 int nega
, negb
, negz
;
3787 Py_ssize_t size_a
, size_b
, size_z
, i
;
3790 /* Bitwise operations for negative numbers operate as though
3791 on a two's complement representation. So convert arguments
3792 from sign-magnitude to two's complement, and convert the
3793 result back to sign-magnitude at the end. */
3795 /* If a is negative, replace it by its two's complement. */
3796 size_a
= ABS(Py_SIZE(a
));
3797 nega
= Py_SIZE(a
) < 0;
3799 z
= _PyLong_New(size_a
);
3802 v_complement(z
->ob_digit
, a
->ob_digit
, size_a
);
3806 /* Keep reference count consistent. */
3810 size_b
= ABS(Py_SIZE(b
));
3811 negb
= Py_SIZE(b
) < 0;
3813 z
= _PyLong_New(size_b
);
3818 v_complement(z
->ob_digit
, b
->ob_digit
, size_b
);
3824 /* Swap a and b if necessary to ensure size_a >= size_b. */
3825 if (size_a
< size_b
) {
3826 z
= a
; a
= b
; b
= z
;
3827 size_z
= size_a
; size_a
= size_b
; size_b
= size_z
;
3828 negz
= nega
; nega
= negb
; negb
= negz
;
3831 /* JRH: The original logic here was to allocate the result value (z)
3832 as the longer of the two operands. However, there are some cases
3833 where the result is guaranteed to be shorter than that: AND of two
3834 positives, OR of two negatives: use the shorter number. AND with
3835 mixed signs: use the positive number. OR with mixed signs: use the
3845 size_z
= negb
? size_a
: size_b
;
3849 size_z
= negb
? size_b
: size_a
;
3852 PyErr_BadArgument();
3856 /* We allow an extra digit if z is negative, to make sure that
3857 the final two's complement of z doesn't overflow. */
3858 z
= _PyLong_New(size_z
+ negz
);
3865 /* Compute digits for overlap of a and b. */
3868 for (i
= 0; i
< size_b
; ++i
)
3869 z
->ob_digit
[i
] = a
->ob_digit
[i
] & b
->ob_digit
[i
];
3872 for (i
= 0; i
< size_b
; ++i
)
3873 z
->ob_digit
[i
] = a
->ob_digit
[i
] | b
->ob_digit
[i
];
3876 for (i
= 0; i
< size_b
; ++i
)
3877 z
->ob_digit
[i
] = a
->ob_digit
[i
] ^ b
->ob_digit
[i
];
3880 PyErr_BadArgument();
3884 /* Copy any remaining digits of a, inverting if necessary. */
3885 if (op
== '^' && negb
)
3886 for (; i
< size_z
; ++i
)
3887 z
->ob_digit
[i
] = a
->ob_digit
[i
] ^ PyLong_MASK
;
3888 else if (i
< size_z
)
3889 memcpy(&z
->ob_digit
[i
], &a
->ob_digit
[i
],
3890 (size_z
-i
)*sizeof(digit
));
3892 /* Complement result if negative. */
3894 Py_SIZE(z
) = -(Py_SIZE(z
));
3895 z
->ob_digit
[size_z
] = PyLong_MASK
;
3896 v_complement(z
->ob_digit
, z
->ob_digit
, size_z
+1);
3901 return (PyObject
*)long_normalize(z
);
3905 long_and(PyObject
*v
, PyObject
*w
)
3907 PyLongObject
*a
, *b
;
3909 CONVERT_BINOP(v
, w
, &a
, &b
);
3910 c
= long_bitwise(a
, '&', b
);
3917 long_xor(PyObject
*v
, PyObject
*w
)
3919 PyLongObject
*a
, *b
;
3921 CONVERT_BINOP(v
, w
, &a
, &b
);
3922 c
= long_bitwise(a
, '^', b
);
3929 long_or(PyObject
*v
, PyObject
*w
)
3931 PyLongObject
*a
, *b
;
3933 CONVERT_BINOP(v
, w
, &a
, &b
);
3934 c
= long_bitwise(a
, '|', b
);
3941 long_coerce(PyObject
**pv
, PyObject
**pw
)
3943 if (PyInt_Check(*pw
)) {
3944 *pw
= PyLong_FromLong(PyInt_AS_LONG(*pw
));
3950 else if (PyLong_Check(*pw
)) {
3955 return 1; /* Can't do it */
3959 long_long(PyObject
*v
)
3961 if (PyLong_CheckExact(v
))
3964 v
= _PyLong_Copy((PyLongObject
*)v
);
3969 long_int(PyObject
*v
)
3972 x
= PyLong_AsLong(v
);
3973 if (PyErr_Occurred()) {
3974 if (PyErr_ExceptionMatches(PyExc_OverflowError
)) {
3976 if (PyLong_CheckExact(v
)) {
3981 return _PyLong_Copy((PyLongObject
*)v
);
3986 return PyInt_FromLong(x
);
3990 long_float(PyObject
*v
)
3993 result
= PyLong_AsDouble(v
);
3994 if (result
== -1.0 && PyErr_Occurred())
3996 return PyFloat_FromDouble(result
);
4000 long_oct(PyObject
*v
)
4002 return _PyLong_Format(v
, 8, 1, 0);
4006 long_hex(PyObject
*v
)
4008 return _PyLong_Format(v
, 16, 1, 0);
4012 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
);
4015 long_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
4018 int base
= -909; /* unlikely! */
4019 static char *kwlist
[] = {"x", "base", 0};
4021 if (type
!= &PyLong_Type
)
4022 return long_subtype_new(type
, args
, kwds
); /* Wimp out */
4023 if (!PyArg_ParseTupleAndKeywords(args
, kwds
, "|Oi:long", kwlist
,
4028 PyErr_SetString(PyExc_TypeError
,
4029 "long() missing string argument");
4032 return PyLong_FromLong(0L);
4035 return PyNumber_Long(x
);
4036 else if (PyString_Check(x
)) {
4037 /* Since PyLong_FromString doesn't have a length parameter,
4038 * check here for possible NULs in the string. */
4039 char *string
= PyString_AS_STRING(x
);
4040 if (strlen(string
) != (size_t)PyString_Size(x
)) {
4041 /* create a repr() of the input string,
4042 * just like PyLong_FromString does. */
4044 srepr
= PyObject_Repr(x
);
4047 PyErr_Format(PyExc_ValueError
,
4048 "invalid literal for long() with base %d: %s",
4049 base
, PyString_AS_STRING(srepr
));
4053 return PyLong_FromString(PyString_AS_STRING(x
), NULL
, base
);
4055 #ifdef Py_USING_UNICODE
4056 else if (PyUnicode_Check(x
))
4057 return PyLong_FromUnicode(PyUnicode_AS_UNICODE(x
),
4058 PyUnicode_GET_SIZE(x
),
4062 PyErr_SetString(PyExc_TypeError
,
4063 "long() can't convert non-string with explicit base");
4068 /* Wimpy, slow approach to tp_new calls for subtypes of long:
4069 first create a regular long from whatever arguments we got,
4070 then allocate a subtype instance and initialize it from
4071 the regular long. The regular long is then thrown away.
4074 long_subtype_new(PyTypeObject
*type
, PyObject
*args
, PyObject
*kwds
)
4076 PyLongObject
*tmp
, *newobj
;
4079 assert(PyType_IsSubtype(type
, &PyLong_Type
));
4080 tmp
= (PyLongObject
*)long_new(&PyLong_Type
, args
, kwds
);
4083 assert(PyLong_CheckExact(tmp
));
4087 newobj
= (PyLongObject
*)type
->tp_alloc(type
, n
);
4088 if (newobj
== NULL
) {
4092 assert(PyLong_Check(newobj
));
4093 Py_SIZE(newobj
) = Py_SIZE(tmp
);
4094 for (i
= 0; i
< n
; i
++)
4095 newobj
->ob_digit
[i
] = tmp
->ob_digit
[i
];
4097 return (PyObject
*)newobj
;
4101 long_getnewargs(PyLongObject
*v
)
4103 return Py_BuildValue("(N)", _PyLong_Copy(v
));
4107 long_get0(PyLongObject
*v
, void *context
) {
4108 return PyLong_FromLong(0L);
4112 long_get1(PyLongObject
*v
, void *context
) {
4113 return PyLong_FromLong(1L);
4117 long__format__(PyObject
*self
, PyObject
*args
)
4119 PyObject
*format_spec
;
4121 if (!PyArg_ParseTuple(args
, "O:__format__", &format_spec
))
4123 if (PyBytes_Check(format_spec
))
4124 return _PyLong_FormatAdvanced(self
,
4125 PyBytes_AS_STRING(format_spec
),
4126 PyBytes_GET_SIZE(format_spec
));
4127 if (PyUnicode_Check(format_spec
)) {
4128 /* Convert format_spec to a str */
4130 PyObject
*str_spec
= PyObject_Str(format_spec
);
4132 if (str_spec
== NULL
)
4135 result
= _PyLong_FormatAdvanced(self
,
4136 PyBytes_AS_STRING(str_spec
),
4137 PyBytes_GET_SIZE(str_spec
));
4139 Py_DECREF(str_spec
);
4142 PyErr_SetString(PyExc_TypeError
, "__format__ requires str or unicode");
4147 long_sizeof(PyLongObject
*v
)
4151 res
= v
->ob_type
->tp_basicsize
+ ABS(Py_SIZE(v
))*sizeof(digit
);
4152 return PyInt_FromSsize_t(res
);
4156 long_bit_length(PyLongObject
*v
)
4158 PyLongObject
*result
, *x
, *y
;
4159 Py_ssize_t ndigits
, msd_bits
= 0;
4163 assert(PyLong_Check(v
));
4165 ndigits
= ABS(Py_SIZE(v
));
4167 return PyInt_FromLong(0);
4169 msd
= v
->ob_digit
[ndigits
-1];
4174 msd_bits
+= (long)(BitLengthTable
[msd
]);
4176 if (ndigits
<= PY_SSIZE_T_MAX
/PyLong_SHIFT
)
4177 return PyInt_FromSsize_t((ndigits
-1)*PyLong_SHIFT
+ msd_bits
);
4179 /* expression above may overflow; use Python integers instead */
4180 result
= (PyLongObject
*)PyLong_FromSsize_t(ndigits
- 1);
4183 x
= (PyLongObject
*)PyLong_FromLong(PyLong_SHIFT
);
4186 y
= (PyLongObject
*)long_mul(result
, x
);
4193 x
= (PyLongObject
*)PyLong_FromLong((long)msd_bits
);
4196 y
= (PyLongObject
*)long_add(result
, x
);
4203 return (PyObject
*)result
;
4210 PyDoc_STRVAR(long_bit_length_doc
,
4211 "long.bit_length() -> int or long\n\
4213 Number of bits necessary to represent self in binary.\n\
4216 >>> (37L).bit_length()\n\
4221 long_is_finite(PyObject
*v
)
4227 static PyMethodDef long_methods
[] = {
4228 {"conjugate", (PyCFunction
)long_long
, METH_NOARGS
,
4229 "Returns self, the complex conjugate of any long."},
4230 {"bit_length", (PyCFunction
)long_bit_length
, METH_NOARGS
,
4231 long_bit_length_doc
},
4233 {"is_finite", (PyCFunction
)long_is_finite
, METH_NOARGS
,
4234 "Returns always True."},
4236 {"__trunc__", (PyCFunction
)long_long
, METH_NOARGS
,
4237 "Truncating an Integral returns itself."},
4238 {"__getnewargs__", (PyCFunction
)long_getnewargs
, METH_NOARGS
},
4239 {"__format__", (PyCFunction
)long__format__
, METH_VARARGS
},
4240 {"__sizeof__", (PyCFunction
)long_sizeof
, METH_NOARGS
,
4241 "Returns size in memory, in bytes"},
4242 {NULL
, NULL
} /* sentinel */
4245 static PyGetSetDef long_getset
[] = {
4247 (getter
)long_long
, (setter
)NULL
,
4248 "the real part of a complex number",
4251 (getter
)long_get0
, (setter
)NULL
,
4252 "the imaginary part of a complex number",
4255 (getter
)long_long
, (setter
)NULL
,
4256 "the numerator of a rational number in lowest terms",
4259 (getter
)long_get1
, (setter
)NULL
,
4260 "the denominator of a rational number in lowest terms",
4262 {NULL
} /* Sentinel */
4265 PyDoc_STRVAR(long_doc
,
4266 "long(x=0) -> long\n\
4267 long(x, base=10) -> long\n\
4269 Convert a number or string to a long integer, or return 0L if no arguments\n\
4270 are given. If x is floating point, the conversion truncates towards zero.\n\
4272 If x is not a number or if base is given, then x must be a string or\n\
4273 Unicode object representing an integer literal in the given base. The\n\
4274 literal can be preceded by '+' or '-' and be surrounded by whitespace.\n\
4275 The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to\n\
4276 interpret the base from the string as an integer literal.\n\
4277 >>> int('0b100', base=0)\n\
4280 static PyNumberMethods long_as_number
= {
4281 (binaryfunc
)long_add
, /*nb_add*/
4282 (binaryfunc
)long_sub
, /*nb_subtract*/
4283 (binaryfunc
)long_mul
, /*nb_multiply*/
4284 long_classic_div
, /*nb_divide*/
4285 long_mod
, /*nb_remainder*/
4286 long_divmod
, /*nb_divmod*/
4287 long_pow
, /*nb_power*/
4288 (unaryfunc
)long_neg
, /*nb_negative*/
4289 (unaryfunc
)long_long
, /*tp_positive*/
4290 (unaryfunc
)long_abs
, /*tp_absolute*/
4291 (inquiry
)long_nonzero
, /*tp_nonzero*/
4292 (unaryfunc
)long_invert
, /*nb_invert*/
4293 long_lshift
, /*nb_lshift*/
4294 (binaryfunc
)long_rshift
, /*nb_rshift*/
4295 long_and
, /*nb_and*/
4296 long_xor
, /*nb_xor*/
4298 long_coerce
, /*nb_coerce*/
4299 long_int
, /*nb_int*/
4300 long_long
, /*nb_long*/
4301 long_float
, /*nb_float*/
4302 long_oct
, /*nb_oct*/
4303 long_hex
, /*nb_hex*/
4304 0, /* nb_inplace_add */
4305 0, /* nb_inplace_subtract */
4306 0, /* nb_inplace_multiply */
4307 0, /* nb_inplace_divide */
4308 0, /* nb_inplace_remainder */
4309 0, /* nb_inplace_power */
4310 0, /* nb_inplace_lshift */
4311 0, /* nb_inplace_rshift */
4312 0, /* nb_inplace_and */
4313 0, /* nb_inplace_xor */
4314 0, /* nb_inplace_or */
4315 long_div
, /* nb_floor_divide */
4316 long_true_divide
, /* nb_true_divide */
4317 0, /* nb_inplace_floor_divide */
4318 0, /* nb_inplace_true_divide */
4319 long_long
, /* nb_index */
4322 PyTypeObject PyLong_Type
= {
4323 PyObject_HEAD_INIT(&PyType_Type
)
4325 "long", /* tp_name */
4326 offsetof(PyLongObject
, ob_digit
), /* tp_basicsize */
4327 sizeof(digit
), /* tp_itemsize */
4328 long_dealloc
, /* tp_dealloc */
4332 (cmpfunc
)long_compare
, /* tp_compare */
4333 long_repr
, /* tp_repr */
4334 &long_as_number
, /* tp_as_number */
4335 0, /* tp_as_sequence */
4336 0, /* tp_as_mapping */
4337 (hashfunc
)long_hash
, /* tp_hash */
4339 long_str
, /* tp_str */
4340 PyObject_GenericGetAttr
, /* tp_getattro */
4341 0, /* tp_setattro */
4342 0, /* tp_as_buffer */
4343 Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_CHECKTYPES
|
4344 Py_TPFLAGS_BASETYPE
| Py_TPFLAGS_LONG_SUBCLASS
, /* tp_flags */
4345 long_doc
, /* tp_doc */
4346 0, /* tp_traverse */
4348 0, /* tp_richcompare */
4349 0, /* tp_weaklistoffset */
4351 0, /* tp_iternext */
4352 long_methods
, /* tp_methods */
4354 long_getset
, /* tp_getset */
4357 0, /* tp_descr_get */
4358 0, /* tp_descr_set */
4359 0, /* tp_dictoffset */
4362 long_new
, /* tp_new */
4363 PyObject_Del
, /* tp_free */
4366 static PyTypeObject Long_InfoType
;
4368 PyDoc_STRVAR(long_info__doc__
,
4371 A struct sequence that holds information about Python's\n\
4372 internal representation of integers. The attributes are read only.");
4374 static PyStructSequence_Field long_info_fields
[] = {
4375 {"bits_per_digit", "size of a digit in bits"},
4376 {"sizeof_digit", "size in bytes of the C type used to represent a digit"},
4380 static PyStructSequence_Desc long_info_desc
= {
4381 "sys.long_info", /* name */
4382 long_info__doc__
, /* doc */
4383 long_info_fields
, /* fields */
4384 2 /* number of fields */
4388 PyLong_GetInfo(void)
4390 PyObject
* long_info
;
4392 long_info
= PyStructSequence_New(&Long_InfoType
);
4393 if (long_info
== NULL
)
4395 PyStructSequence_SET_ITEM(long_info
, field
++,
4396 PyInt_FromLong(PyLong_SHIFT
));
4397 PyStructSequence_SET_ITEM(long_info
, field
++,
4398 PyInt_FromLong(sizeof(digit
)));
4399 if (PyErr_Occurred()) {
4400 Py_CLEAR(long_info
);
4409 /* initialize long_info */
4410 if (Long_InfoType
.tp_name
== 0)
4411 PyStructSequence_InitType(&Long_InfoType
, &long_info_desc
);