Revision 27 1.10/trunk/binaries/mac/raster/gdal/GDAL.framework/Versions/1.7/Python/site-packages/numpy/core/setup_common.py
setup_common.py | ||
---|---|---|
1 | 1 |
# Code shared by distutils and scons builds |
2 |
import sys |
|
3 |
from os.path import join |
|
4 |
import warnings |
|
5 |
import copy |
|
6 |
import binascii |
|
2 | 7 |
|
8 |
from distutils.ccompiler import CompileError |
|
9 |
|
|
10 |
#------------------- |
|
11 |
# Versioning support |
|
12 |
#------------------- |
|
13 |
# How to change C_API_VERSION ? |
|
14 |
# - increase C_API_VERSION value |
|
15 |
# - record the hash for the new C API with the script cversions.py |
|
16 |
# and add the hash to cversions.txt |
|
17 |
# The hash values are used to remind developers when the C API number was not |
|
18 |
# updated - generates a MismatchCAPIWarning warning which is turned into an |
|
19 |
# exception for released version. |
|
20 |
|
|
21 |
# Binary compatibility version number. This number is increased whenever the |
|
22 |
# C-API is changed such that binary compatibility is broken, i.e. whenever a |
|
23 |
# recompile of extension modules is needed. |
|
24 |
C_ABI_VERSION = 0x01000009 |
|
25 |
|
|
26 |
# Minor API version. This number is increased whenever a change is made to the |
|
27 |
# C-API -- whether it breaks binary compatibility or not. Some changes, such |
|
28 |
# as adding a function pointer to the end of the function table, can be made |
|
29 |
# without breaking binary compatibility. In this case, only the C_API_VERSION |
|
30 |
# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is |
|
31 |
# broken, both C_API_VERSION and C_ABI_VERSION should be increased. |
|
32 |
C_API_VERSION = 0x00000004 |
|
33 |
|
|
34 |
class MismatchCAPIWarning(Warning): |
|
35 |
pass |
|
36 |
|
|
37 |
def is_released(config): |
|
38 |
"""Return True if a released version of numpy is detected.""" |
|
39 |
from distutils.version import LooseVersion |
|
40 |
|
|
41 |
v = config.get_version('../version.py') |
|
42 |
if v is None: |
|
43 |
raise ValueError("Could not get version") |
|
44 |
pv = LooseVersion(vstring=v).version |
|
45 |
if len(pv) > 3: |
|
46 |
return False |
|
47 |
return True |
|
48 |
|
|
49 |
def get_api_versions(apiversion, codegen_dir): |
|
50 |
"""Return current C API checksum and the recorded checksum for the given |
|
51 |
version of the C API version.""" |
|
52 |
api_files = [join(codegen_dir, 'numpy_api_order.txt'), |
|
53 |
join(codegen_dir, 'ufunc_api_order.txt')] |
|
54 |
|
|
55 |
# Compute the hash of the current API as defined in the .txt files in |
|
56 |
# code_generators |
|
57 |
sys.path.insert(0, codegen_dir) |
|
58 |
try: |
|
59 |
m = __import__('genapi') |
|
60 |
numpy_api = __import__('numpy_api') |
|
61 |
curapi_hash = m.fullapi_hash(numpy_api.full_api) |
|
62 |
apis_hash = m.get_versions_hash() |
|
63 |
finally: |
|
64 |
del sys.path[0] |
|
65 |
|
|
66 |
return curapi_hash, apis_hash[apiversion] |
|
67 |
|
|
68 |
def check_api_version(apiversion, codegen_dir): |
|
69 |
"""Emits a MismacthCAPIWarning if the C API version needs updating.""" |
|
70 |
curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) |
|
71 |
|
|
72 |
# If different hash, it means that the api .txt files in |
|
73 |
# codegen_dir have been updated without the API version being |
|
74 |
# updated. Any modification in those .txt files should be reflected |
|
75 |
# in the api and eventually abi versions. |
|
76 |
# To compute the checksum of the current API, use |
|
77 |
# code_generators/cversions.py script |
|
78 |
if not curapi_hash == api_hash: |
|
79 |
msg = "API mismatch detected, the C API version " \ |
|
80 |
"numbers have to be updated. Current C api version is %d, " \ |
|
81 |
"with checksum %s, but recorded checksum for C API version %d in " \ |
|
82 |
"codegen_dir/cversions.txt is %s. If functions were added in the " \ |
|
83 |
"C API, you have to update C_API_VERSION in %s." |
|
84 |
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, |
|
85 |
__file__), |
|
86 |
MismatchCAPIWarning) |
|
3 | 87 |
# Mandatory functions: if not found, fail the build |
4 | 88 |
MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", |
5 | 89 |
"floor", "ceil", "sqrt", "log10", "log", "exp", "asin", |
... | ... | |
8 | 92 |
# Standard functions which may not be available and for which we have a |
9 | 93 |
# replacement implementation. Note that some of these are C99 functions. |
10 | 94 |
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", |
11 |
"rint", "trunc", "exp2", "log2"] |
|
95 |
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", |
|
96 |
"copysign", "nextafter"] |
|
12 | 97 |
|
13 | 98 |
# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h |
14 |
OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh"] |
|
99 |
OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot", |
|
100 |
"copysign"] |
|
15 | 101 |
|
16 | 102 |
# C99 functions: float and long double versions |
17 | 103 |
C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", |
18 | 104 |
"ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", |
19 | 105 |
"expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", |
20 | 106 |
"hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', |
21 |
"exp2", "log2"] |
|
107 |
"exp2", "log2", "copysign", "nextafter"]
|
|
22 | 108 |
|
23 | 109 |
C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] |
24 | 110 |
C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] |
25 | 111 |
|
112 |
C99_COMPLEX_TYPES = ['complex double', 'complex float', 'complex long double'] |
|
113 |
|
|
114 |
C99_COMPLEX_FUNCS = ['creal', 'cimag', 'cabs', 'carg', 'cexp', 'csqrt', 'clog', |
|
115 |
'ccos', 'csin', 'cpow'] |
|
116 |
|
|
26 | 117 |
def fname2def(name): |
27 | 118 |
return "HAVE_%s" % name.upper() |
119 |
|
|
120 |
def sym2def(symbol): |
|
121 |
define = symbol.replace(' ', '') |
|
122 |
return define.upper() |
|
123 |
|
|
124 |
def type2def(symbol): |
|
125 |
define = symbol.replace(' ', '_') |
|
126 |
return define.upper() |
|
127 |
|
|
128 |
# Code to detect long double representation taken from MPFR m4 macro |
|
129 |
def check_long_double_representation(cmd): |
|
130 |
cmd._check_compiler() |
|
131 |
body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} |
|
132 |
|
|
133 |
# We need to use _compile because we need the object filename |
|
134 |
src, object = cmd._compile(body, None, None, 'c') |
|
135 |
try: |
|
136 |
type = long_double_representation(pyod(object)) |
|
137 |
return type |
|
138 |
finally: |
|
139 |
cmd._clean() |
|
140 |
|
|
141 |
LONG_DOUBLE_REPRESENTATION_SRC = r""" |
|
142 |
/* "before" is 16 bytes to ensure there's no padding between it and "x". |
|
143 |
* We're not expecting any "long double" bigger than 16 bytes or with |
|
144 |
* alignment requirements stricter than 16 bytes. */ |
|
145 |
typedef %(type)s test_type; |
|
146 |
|
|
147 |
struct { |
|
148 |
char before[16]; |
|
149 |
test_type x; |
|
150 |
char after[8]; |
|
151 |
} foo = { |
|
152 |
{ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', |
|
153 |
'\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, |
|
154 |
-123456789.0, |
|
155 |
{ '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } |
|
156 |
}; |
|
157 |
""" |
|
158 |
|
|
159 |
def pyod(filename): |
|
160 |
"""Python implementation of the od UNIX utility (od -b, more exactly). |
|
161 |
|
|
162 |
Parameters |
|
163 |
---------- |
|
164 |
filename: str |
|
165 |
name of the file to get the dump from. |
|
166 |
|
|
167 |
Returns |
|
168 |
------- |
|
169 |
out: seq |
|
170 |
list of lines of od output |
|
171 |
Note |
|
172 |
---- |
|
173 |
We only implement enough to get the necessary information for long double |
|
174 |
representation, this is not intended as a compatible replacement for od. |
|
175 |
""" |
|
176 |
out = [] |
|
177 |
|
|
178 |
fid = open(filename, 'r') |
|
179 |
try: |
|
180 |
yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] |
|
181 |
for i in range(0, len(yo), 16): |
|
182 |
line = ['%07d' % int(oct(i))] |
|
183 |
line.extend(['%03d' % c for c in yo[i:i+16]]) |
|
184 |
out.append(" ".join(line)) |
|
185 |
return out |
|
186 |
finally: |
|
187 |
fid.close() |
|
188 |
|
|
189 |
_BEFORE_SEQ = ['000','000','000','000','000','000','000','000', |
|
190 |
'001','043','105','147','211','253','315','357'] |
|
191 |
_AFTER_SEQ = ['376', '334','272','230','166','124','062','020'] |
|
192 |
|
|
193 |
_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] |
|
194 |
_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] |
|
195 |
_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', |
|
196 |
'031', '300', '000', '000'] |
|
197 |
_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', |
|
198 |
'031', '300', '000', '000', '000', '000', '000', '000'] |
|
199 |
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', |
|
200 |
'000', '000', '000', '000', '000', '000', '000', '000'] |
|
201 |
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] |
|
202 |
|
|
203 |
def long_double_representation(lines): |
|
204 |
"""Given a binary dump as given by GNU od -b, look for long double |
|
205 |
representation.""" |
|
206 |
|
|
207 |
# Read contains a list of 32 items, each item is a byte (in octal |
|
208 |
# representation, as a string). We 'slide' over the output until read is of |
|
209 |
# the form before_seq + content + after_sequence, where content is the long double |
|
210 |
# representation: |
|
211 |
# - content is 12 bytes: 80 bits Intel representation |
|
212 |
# - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision |
|
213 |
# - content is 8 bytes: same as double (not implemented yet) |
|
214 |
read = [''] * 32 |
|
215 |
saw = None |
|
216 |
for line in lines: |
|
217 |
# we skip the first word, as od -b output an index at the beginning of |
|
218 |
# each line |
|
219 |
for w in line.split()[1:]: |
|
220 |
read.pop(0) |
|
221 |
read.append(w) |
|
222 |
|
|
223 |
# If the end of read is equal to the after_sequence, read contains |
|
224 |
# the long double |
|
225 |
if read[-8:] == _AFTER_SEQ: |
|
226 |
saw = copy.copy(read) |
|
227 |
if read[:12] == _BEFORE_SEQ[4:]: |
|
228 |
if read[12:-8] == _INTEL_EXTENDED_12B: |
|
229 |
return 'INTEL_EXTENDED_12_BYTES_LE' |
|
230 |
elif read[:8] == _BEFORE_SEQ[8:]: |
|
231 |
if read[8:-8] == _INTEL_EXTENDED_16B: |
|
232 |
return 'INTEL_EXTENDED_16_BYTES_LE' |
|
233 |
elif read[8:-8] == _IEEE_QUAD_PREC_BE: |
|
234 |
return 'IEEE_QUAD_BE' |
|
235 |
elif read[8:-8] == _IEEE_QUAD_PREC_LE: |
|
236 |
return 'IEEE_QUAD_LE' |
|
237 |
elif read[:16] == _BEFORE_SEQ: |
|
238 |
if read[16:-8] == _IEEE_DOUBLE_LE: |
|
239 |
return 'IEEE_DOUBLE_LE' |
|
240 |
elif read[16:-8] == _IEEE_DOUBLE_BE: |
|
241 |
return 'IEEE_DOUBLE_BE' |
|
242 |
|
|
243 |
if saw is not None: |
|
244 |
raise ValueError("Unrecognized format (%s)" % saw) |
|
245 |
else: |
|
246 |
# We never detected the after_sequence |
|
247 |
raise ValueError("Could not lock sequences (%s)" % saw) |
Also available in: Unified diff