diff -r cabd257b1070 Lib/math.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/math.py Thu Mar 05 23:51:57 2015 +0100 @@ -0,0 +1,346 @@ +# The math module is always available. It provides access to the mathematical +# functions defined by the C standard. + +# Constants +e = 2.7182818284590452354 +pi = 3.14159265358979323846 +inf = float("inf") +try: + nan = float("nan") +except ValueError: + pass + + +def _count_set_bits(n): + count = 0 + while n != 0: + count += 1 + n &= n - 1 # clear least significant bit + return count + + +# Divide-and-conquer factorial algorithm +# +# Based on the formula and psuedo-code provided at: +# http://www.luschny.de/math/factorial/binarysplitfact.html +# +# Faster algorithms exist, but they're more complicated and depend on +# a fast prime factorization algorithm. +# +# Notes on the algorithm +# ---------------------- +# +# factorial(n) is written in the form 2**k * m, with m odd. k and m are +# computed separately, and then combined using a left shift. +# +# The function factorial_odd_part computes the odd part m (i.e., the greatest +# odd divisor) of factorial(n), using the formula: +# +# factorial_odd_part(n) = +# +# product_{i >= 0} product_{0 < j <= n / 2**i, j odd} j +# +# Example: factorial_odd_part(20) = +# +# (1) * +# (1) * +# (1 * 3 * 5) * +# (1 * 3 * 5 * 7 * 9) +# (1 * 3 * 5 * 7 * 9 * 11 * 13 * 15 * 17 * 19) +# +# Here i goes from large to small: the first term corresponds to i=4 (any +# larger i gives an empty product), and the last term corresponds to i=0. +# Each term can be computed from the last by multiplying by the extra odd +# numbers required: e.g., to get from the penultimate term to the last one, +# we multiply by (11 * 13 * 15 * 17 * 19). +# +# To see a hint of why this formula works, here are the same numbers as above +# but with the even parts (i.e., the appropriate powers of 2) included. For +# each subterm in the product for i, we multiply that subterm by 2**i: +# +# factorial(20) = +# +# (16) * +# (8) * +# (4 * 12 * 20) * +# (2 * 6 * 10 * 14 * 18) * +# (1 * 3 * 5 * 7 * 9 * 11 * 13 * 15 * 17 * 19) +# +# The factorial_partial_product function computes the product of all odd j in +# range(start, stop) for given start and stop. It's used to compute the +# partial products like (11 * 13 * 15 * 17 * 19) in the example above. It +# operates recursively, repeatedly splitting the range into two roughly equal +# pieces until the subranges are small enough to be computed using only C +# integer arithmetic. +# +# The two-valuation k (i.e., the exponent of the largest power of 2 dividing +# the factorial) is computed independently in the main math_factorial +# function. By standard results, its value is: +# +# two_valuation = n//2 + n//4 + n//8 + .... +# +# It can be shown (e.g., by complete induction on n) that two_valuation is +# equal to n - count_set_bits(n), where count_set_bits(n) gives the number of +# '1'-bits in the binary expansion of n. + + +def _factorial_partial_product(start, stop, max_bits): + """Compute product(range(start, stop, 2)) using divide and conquer. + Assumes start and stop are odd and stop > start. + max_bits must be >= (stop - 2).bit_length(). + """ + + # If the return value will fit an unsigned long, then we can multiply in a + # tight, fast loop where each multiply is O(1). Compute an upper bound on + # the number of bits required to store the answer. + # + # Storing some integer z requires floor(lg(z))+1 bits, which is + # conveniently the value returned by bit_length(z). The product x*y will + # require at most bit_length(x) + bit_length(y) bits to store, based on the + # idea that lg product = lg x + lg y. + # + # We know that stop - 2 is the largest number to be multiplied. From + # there, we have: + # bit_length(answer) <= num_operands * bit_length(stop - 2) + + num_operands = (stop - start) // 2 + + # The "num_operands <= 8 * SIZEOF_LONG" check guards against the + # unlikely case of an overflow in num_operands * max_bits. + + # FIXME: drop SIZEOF_LONG + SIZEOF_LONG = 8 + if num_operands <= 8 * SIZEOF_LONG and num_operands * max_bits <= 8 * SIZEOF_LONG: + total = start + for j in range(start + 2, stop, 2): + total *= j + return total + + # find midpoint of range(start, stop), rounded up to next odd number. + midpoint = (start + num_operands) | 1 + left = _factorial_partial_product(start, midpoint, + (midpoint - 2).bit_length()) + right = _factorial_partial_product(midpoint, stop, max_bits) + return left * right + + +def _factorial_odd_part(n): + """Compute the odd part of factorial(n).""" + + outer = inner = 1 + + upper = 3 + for i in range(n.bit_length() - 2, -1, -1): + v = n >> i + if v <= 2: + continue + lower = upper + + # (v + 1) | 1 = least odd integer strictly larger than n / 2**i + upper = (v + 1) | 1 + + # Here inner is the product of all odd integers j in the range (0, + # n/2**(i+1)]. The factorial_partial_product call below gives the + # product of all odd integers j in the range (n/2**(i+1), n/2**i]. + partial = _factorial_partial_product(lower, upper, + (upper-2).bit_length()) + inner *= partial + + # Now inner is the product of all odd integers j in the range (0, + # n/2**i], giving the inner product in the formula above. + outer *= inner + + return outer + + +# Lookup table for small factorial values + +_SmallFactorials = [ + 1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800, 39916800, + 479001600, 6227020800, 87178291200, 1307674368000, 20922789888000, + 355687428096000, 6402373705728000, 121645100408832000, 2432902008176640000 +] + + +def factorial(x): + """factorial(x) -> Integral + + Find x!. Raise a ValueError if x is negative or non-integral. + """ + + if isinstance(x, float): + dx = floor(x) + if not(isfinite(x) and x == dx): + raise ValueError("factorial() only accepts integral values") + x = int(dx) + if not isinstance(x, int): + raise TypeError("expect int, got %s" % type(x).__name__) + + if x < 0: + raise ValueError("factorial() not defined for negative values") + + # use lookup table if x is small + if x < len(_SmallFactorials): + return _SmallFactorials[x] + + # else express in the form odd_part * 2**two_valuation, and compute as + # odd_part << two_valuation. + odd_part = _factorial_odd_part(x) + two_valuation = x - _count_set_bits(x) + result = odd_part << two_valuation + return result + + +def degrees(x): + """Convert angle x from radians to degrees.""" + return x * 180.0 / pi + + +def radians(x): + """Convert angle x from degrees to radians.""" + return x * pi / 180.0 + + +# Precision summation function as msum() by Raymond Hettinger in +# , +# enhanced with the exact partials sum and roundoff from Mark +# Dickinson's post at . +# See those links for more details, proofs and other references. +# +# Note 1: IEEE 754R floating point semantics are assumed, +# but the current implementation does not re-establish special +# value semantics across iterations (i.e. handling -Inf + Inf). +# +# Note 2: No provision is made for intermediate overflow handling; +# therefore, sum([1e+308, 1e-308, 1e+308]) returns 1e+308 while +# sum([1e+308, 1e+308, 1e-308]) raises an OverflowError due to the +# overflow of the first partial sum. +# +# Note 3: The intermediate values lo, yr, and hi are declared volatile so +# aggressive compilers won't algebraically reduce lo to always be exactly 0.0. +# Also, the volatile declaration forces the values to be stored in memory as +# regular doubles instead of extended long precision (80-bit) values. This +# prevents double rounding because any addition or subtraction of two doubles +# can be resolved exactly into double-sized hi and lo values. As long as the +# hi value gets forced into a double before yr and lo are computed, the extra +# bits in downstream extended precision operations (x87 for example) will be +# exactly zero and therefore can be losslessly stored back into a double, +# thereby preventing double rounding. +# +# Note 4: A similar implementation is in Modules/cmathmodule.c. +# Be sure to update both when making changes. +# +# Note 5: The signature of math.fsum() differs from __builtin__.sum() +# because the start argument doesn't make sense in the context of +# accurate summation. Since the partials table is collapsed before +# returning a result, sum(seq2, start=sum(seq1)) may not equal the +# accurate result returned by sum(itertools.chain(seq1, seq2)). + + +# Full precision summation of a sequence of floats. + +# def msum(iterable): +# partials = [] # sorted, non-overlapping partial sums +# for x in iterable: +# i = 0 +# for y in partials: +# if abs(x) < abs(y): +# x, y = y, x +# hi = x + y +# lo = y - (hi - x) +# if lo: +# partials[i] = lo +# i += 1 +# x = hi +# partials[i:] = [x] +# return sum_exact(partials) +# +# Rounded x+y stored in hi with the roundoff stored in lo. Together hi+lo +# are exactly equal to x+y. The inner loop applies hi/lo summation to each +# partial so that the list of partial sums remains exact. +# +# Sum_exact() adds the partial sums exactly and correctly rounds the final +# result (using the round-half-to-even rule). The items in partials remain +# non-zero, non-special, non-overlapping and strictly increasing in +# magnitude, but possibly not all having the same sign. +# +# Depends on IEEE 754 arithmetic guarantees and half-even rounding. + + +def fsum(seq): + partials = [] + inf_sum = special_sum = 0.0 + + for x in seq: + xsave = x + i = 0 + for y in partials: + if abs(x) < abs(y): + x, y = y, x + hi = x + y + lo = y - (hi - x) + if lo != 0.0: + partials[i] = lo + i += 1 + x = hi + + del partials[i:] + + if x: + if not isfinite(x): + # a nonfinite x could arise either as a result of intermediate + # overflow, or as a result of a nan or inf in the summands + if isfinite(xsave): + raise OverflowError("intermediate overflow in fsum") + if isinf(xsave): + inf_sum += xsave + special_sum += xsave + + # reset partials + del partials[:] + else: + partials.append(x) + + if special_sum: + if isnan(inf_sum): + raise ValueError("-inf + inf in fsum") + else: + return special_sum + + if not partials: + return 0.0 + + hi = partials.pop() + # sum_exact(ps, hi) from the top, stop when the sum becomes inexact. + while partials: + x = hi + y = partials.pop() + assert abs(y) < abs(x), (y, x) + hi = x + y + lo = y - (hi - x) + if lo: + break + + # Make half-even rounding work across multiple partials. Needed so + # that sum([1e-16, 1, 1e16]) will round-up the last digit to two + # instead of down to zero (the 1e-16 makes the 1 slightly closer to + # two). With a potential 1 ULP rounding error fixed-up, math.fsum() + # can guarantee commutativity. + if (partials + and ((lo < 0.0 and partials[-1] < 0.0) + or (lo > 0.0 and partials[-1] > 0.0))): + y = lo * 2.0 + x = hi + y + yr = x - hi + if y == yr: + hi = x + + return hi + + +try: + from _math import * +except ImportError: + # In fact, math.py doesn't work without _math: floor(), isfinite(), isinf() + # and isnan() functions are needed by factorial() and fsum() + pass diff -r cabd257b1070 Lib/test/test_math.py --- a/Lib/test/test_math.py Thu Mar 05 14:04:03 2015 +0100 +++ b/Lib/test/test_math.py Thu Mar 05 23:51:57 2015 +0100 @@ -4,13 +4,23 @@ from test.support import run_unittest, verbose, requires_IEEE_754 from test import support import unittest -import math import os import platform import sys import struct import sysconfig +math = support.import_fresh_module('math', fresh=['_math']) +py_math = support.import_fresh_module('math', blocked=['_math']) + +# math.py requires a few symbols of _math: inject them manually, py_math +# is still useful to test the Python implementation of math functions +py_math.floor = math.floor +py_math.isfinite = math.isfinite +py_math.isinf = math.isinf +py_math.isnan = math.isnan + + eps = 1E-05 NAN = float('nan') INF = float('inf') @@ -175,7 +185,8 @@ def parse_testfile(fname): flags ) -class MathTests(unittest.TestCase): +class MathMixin: + mathmod = None def ftest(self, name, value, expected): if abs(value-expected) > eps: @@ -186,6 +197,138 @@ class MathTests(unittest.TestCase): self.fail('%s returned %r, expected %r' % (name, value, expected)) + def testDegrees(self): + self.assertRaises(TypeError, self.mathmod.degrees) + self.ftest('degrees(pi)', self.mathmod.degrees(self.mathmod.pi), 180.0) + self.ftest('degrees(pi/2)', self.mathmod.degrees(self.mathmod.pi/2), 90.0) + self.ftest('degrees(-pi/4)', self.mathmod.degrees(-self.mathmod.pi/4), -45.0) + + @requires_IEEE_754 + @unittest.skipIf(HAVE_DOUBLE_ROUNDING, + "fsum is not exact on machines with double rounding") + def testFsum(self): + # self.mathmod.fsum relies on exact rounding for correct operation. + # There's a known problem with IA32 floating-point that causes + # inexact rounding in some situations, and will cause the + # self.mathmod.fsum tests below to fail; see issue #2937. On non IEEE + # 754 platforms, and on IEEE 754 platforms that exhibit the + # problem described in issue #2937, we simply skip the whole + # test. + + # Python version of math.fsum, for comparison. Uses a different + # algorithm based on frexp, ldexp and integer arithmetic. + from sys import float_info + mant_dig = float_info.mant_dig + etiny = float_info.min_exp - mant_dig + + def msum(iterable): + """Full precision summation. Compute sum(iterable) without any + intermediate accumulation of error. Based on the 'lsum' function + at http://code.activestate.com/recipes/393090/ + + """ + tmant, texp = 0, 0 + for x in iterable: + # Use directly the math module, py_math doesn't have frexp() + # nor ldexp() + mant, exp = math.frexp(x) + mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig + if texp > exp: + tmant <<= texp-exp + texp = exp + else: + mant <<= exp-texp + tmant += mant + # Round tmant * 2**texp to a float. The original recipe + # used float(str(tmant)) * 2.0**texp for this, but that's + # a little unsafe because str -> float conversion can't be + # relied upon to do correct rounding on all platforms. + tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp) + if tail > 0: + h = 1 << (tail-1) + tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1) + texp += tail + return math.ldexp(tmant, texp) + + test_values = [ + ([], 0.0), + ([0.0], 0.0), + ([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100), + ([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0), + ([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0), + ([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0), + ([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0), + ([1./n for n in range(1, 1001)], + float.fromhex('0x1.df11f45f4e61ap+2')), + ([(-1.)**n/n for n in range(1, 1001)], + float.fromhex('-0x1.62a2af1bd3624p-1')), + ([1.7**(i+1)-1.7**i for i in range(1000)] + [-1.7**1000], -1.0), + ([1e16, 1., 1e-16], 10000000000000002.0), + ([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0), + # exercise code for resizing partials array + ([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] + + [-2.**1022], + float.fromhex('0x1.5555555555555p+970')), + ] + + for i, (vals, expected) in enumerate(test_values): + try: + actual = self.mathmod.fsum(vals) + except OverflowError: + self.fail("test %d failed: got OverflowError, expected %r " + "for self.mathmod.fsum(%.100r)" % (i, expected, vals)) + except ValueError: + self.fail("test %d failed: got ValueError, expected %r " + "for self.mathmod.fsum(%.100r)" % (i, expected, vals)) + self.assertEqual(actual, expected) + + from random import random, gauss, shuffle + for j in range(1000): + vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10 + s = 0 + for i in range(200): + v = gauss(0, random()) ** 7 - s + s += v + vals.append(v) + shuffle(vals) + + s = msum(vals) + self.assertEqual(msum(vals), self.mathmod.fsum(vals)) + + @requires_IEEE_754 + def test_fsum_inf(self): + inf = self.mathmod.inf + self.assertEqual(self.mathmod.fsum([inf, inf, inf]), inf) + self.assertEqual(self.mathmod.fsum([-inf, -inf, -inf]), -inf) + + self.assertRaises(ValueError, + self.mathmod.fsum, [-inf, inf]) + + def testFactorial(self): + self.assertEqual(self.mathmod.factorial(0), 1) + self.assertEqual(self.mathmod.factorial(0.0), 1) + total = 1 + for i in range(1, 1000): + total *= i + self.assertEqual(self.mathmod.factorial(i), total) + self.assertEqual(self.mathmod.factorial(float(i)), total) + self.assertEqual(self.mathmod.factorial(i), py_factorial(i)) + self.assertRaises(ValueError, self.mathmod.factorial, -1) + self.assertRaises(ValueError, self.mathmod.factorial, -1.0) + self.assertRaises(ValueError, self.mathmod.factorial, -10**100) + self.assertRaises(ValueError, self.mathmod.factorial, -1e100) + self.assertRaises(ValueError, self.mathmod.factorial, self.mathmod.pi) + + def testRadians(self): + self.assertRaises(TypeError, self.mathmod.radians) + self.ftest('radians(180)', self.mathmod.radians(180), self.mathmod.pi) + self.ftest('radians(90)', self.mathmod.radians(90), self.mathmod.pi/2) + self.ftest('radians(-45)', self.mathmod.radians(-45), -self.mathmod.pi/4) + + +class MathTests(MathMixin, unittest.TestCase): + mathmod = math + def testConstants(self): self.ftest('pi', math.pi, 3.1415926) self.ftest('e', math.e, 2.7182818) @@ -390,12 +533,6 @@ class MathTests(unittest.TestCase): self.assertEqual(math.cosh(NINF), INF) self.assertTrue(math.isnan(math.cosh(NAN))) - def testDegrees(self): - self.assertRaises(TypeError, math.degrees) - self.ftest('degrees(pi)', math.degrees(math.pi), 180.0) - self.ftest('degrees(pi/2)', math.degrees(math.pi/2), 90.0) - self.ftest('degrees(-pi/4)', math.degrees(-math.pi/4), -45.0) - def testExp(self): self.assertRaises(TypeError, math.exp) self.ftest('exp(-1)', math.exp(-1), 1/math.e) @@ -411,21 +548,6 @@ class MathTests(unittest.TestCase): self.ftest('fabs(0)', math.fabs(0), 0) self.ftest('fabs(1)', math.fabs(1), 1) - def testFactorial(self): - self.assertEqual(math.factorial(0), 1) - self.assertEqual(math.factorial(0.0), 1) - total = 1 - for i in range(1, 1000): - total *= i - self.assertEqual(math.factorial(i), total) - self.assertEqual(math.factorial(float(i)), total) - self.assertEqual(math.factorial(i), py_factorial(i)) - self.assertRaises(ValueError, math.factorial, -1) - self.assertRaises(ValueError, math.factorial, -1.0) - self.assertRaises(ValueError, math.factorial, -10**100) - self.assertRaises(ValueError, math.factorial, -1e100) - self.assertRaises(ValueError, math.factorial, math.pi) - # Other implementations may place different upper bounds. @support.cpython_only def testFactorialHugeInputs(self): @@ -504,97 +626,6 @@ class MathTests(unittest.TestCase): self.assertEqual(math.frexp(NINF)[0], NINF) self.assertTrue(math.isnan(math.frexp(NAN)[0])) - @requires_IEEE_754 - @unittest.skipIf(HAVE_DOUBLE_ROUNDING, - "fsum is not exact on machines with double rounding") - def testFsum(self): - # math.fsum relies on exact rounding for correct operation. - # There's a known problem with IA32 floating-point that causes - # inexact rounding in some situations, and will cause the - # math.fsum tests below to fail; see issue #2937. On non IEEE - # 754 platforms, and on IEEE 754 platforms that exhibit the - # problem described in issue #2937, we simply skip the whole - # test. - - # Python version of math.fsum, for comparison. Uses a - # different algorithm based on frexp, ldexp and integer - # arithmetic. - from sys import float_info - mant_dig = float_info.mant_dig - etiny = float_info.min_exp - mant_dig - - def msum(iterable): - """Full precision summation. Compute sum(iterable) without any - intermediate accumulation of error. Based on the 'lsum' function - at http://code.activestate.com/recipes/393090/ - - """ - tmant, texp = 0, 0 - for x in iterable: - mant, exp = math.frexp(x) - mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig - if texp > exp: - tmant <<= texp-exp - texp = exp - else: - mant <<= exp-texp - tmant += mant - # Round tmant * 2**texp to a float. The original recipe - # used float(str(tmant)) * 2.0**texp for this, but that's - # a little unsafe because str -> float conversion can't be - # relied upon to do correct rounding on all platforms. - tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp) - if tail > 0: - h = 1 << (tail-1) - tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1) - texp += tail - return math.ldexp(tmant, texp) - - test_values = [ - ([], 0.0), - ([0.0], 0.0), - ([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100), - ([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0), - ([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0), - ([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0), - ([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0), - ([1./n for n in range(1, 1001)], - float.fromhex('0x1.df11f45f4e61ap+2')), - ([(-1.)**n/n for n in range(1, 1001)], - float.fromhex('-0x1.62a2af1bd3624p-1')), - ([1.7**(i+1)-1.7**i for i in range(1000)] + [-1.7**1000], -1.0), - ([1e16, 1., 1e-16], 10000000000000002.0), - ([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0), - # exercise code for resizing partials array - ([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] + - [-2.**1022], - float.fromhex('0x1.5555555555555p+970')), - ] - - for i, (vals, expected) in enumerate(test_values): - try: - actual = math.fsum(vals) - except OverflowError: - self.fail("test %d failed: got OverflowError, expected %r " - "for math.fsum(%.100r)" % (i, expected, vals)) - except ValueError: - self.fail("test %d failed: got ValueError, expected %r " - "for math.fsum(%.100r)" % (i, expected, vals)) - self.assertEqual(actual, expected) - - from random import random, gauss, shuffle - for j in range(1000): - vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10 - s = 0 - for i in range(200): - v = gauss(0, random()) ** 7 - s - s += v - vals.append(v) - shuffle(vals) - - s = msum(vals) - self.assertEqual(msum(vals), math.fsum(vals)) - def testHypot(self): self.assertRaises(TypeError, math.hypot) self.ftest('hypot(0,0)', math.hypot(0,0), 0) @@ -866,12 +897,6 @@ class MathTests(unittest.TestCase): #self.assertEqual(1.**NINF, 1) #self.assertEqual(1.**0, 1) - def testRadians(self): - self.assertRaises(TypeError, math.radians) - self.ftest('radians(180)', math.radians(180), math.pi) - self.ftest('radians(90)', math.radians(90), math.pi/2) - self.ftest('radians(-45)', math.radians(-45), -math.pi/4) - def testSin(self): self.assertRaises(TypeError, math.sin) self.ftest('sin(0)', math.sin(0), 0) @@ -1115,10 +1140,15 @@ class MathTests(unittest.TestCase): '\n '.join(failures)) +class PyMathTests(MathMixin, unittest.TestCase): + mathmod = py_math + + def test_main(): from doctest import DocFileSuite suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(MathTests)) + suite.addTest(unittest.makeSuite(PyMathTests)) suite.addTest(DocFileSuite("ieee754.txt")) run_unittest(suite) diff -r cabd257b1070 Modules/mathmodule.c --- a/Modules/mathmodule.c Thu Mar 05 14:04:03 2015 +0100 +++ b/Modules/mathmodule.c Thu Mar 05 23:51:57 2015 +0100 @@ -2017,7 +2017,7 @@ PyDoc_STRVAR(module_doc, static struct PyModuleDef mathmodule = { PyModuleDef_HEAD_INIT, - "math", + "_math", module_doc, -1, math_methods, @@ -2028,7 +2028,7 @@ static struct PyModuleDef mathmodule = { }; PyMODINIT_FUNC -PyInit_math(void) +PyInit__math(void) { PyObject *m; diff -r cabd257b1070 setup.py --- a/setup.py Thu Mar 05 14:04:03 2015 +0100 +++ b/setup.py Thu Mar 05 23:51:57 2015 +0100 @@ -587,7 +587,7 @@ class PyBuildExt(build_ext): depends=['_math.h'], libraries=math_libs) ) # math library functions, e.g. sin() - exts.append( Extension('math', ['mathmodule.c', '_math.c'], + exts.append( Extension('_math', ['mathmodule.c', '_math.c'], depends=['_math.h'], libraries=math_libs) )