_internal.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. """
  2. A place for internal code
  3. Some things are more easily handled Python.
  4. """
  5. import ast
  6. import re
  7. import sys
  8. import platform
  9. from .multiarray import dtype, array, ndarray
  10. try:
  11. import ctypes
  12. except ImportError:
  13. ctypes = None
  14. IS_PYPY = platform.python_implementation() == 'PyPy'
  15. if (sys.byteorder == 'little'):
  16. _nbo = '<'
  17. else:
  18. _nbo = '>'
  19. def _makenames_list(adict, align):
  20. allfields = []
  21. fnames = list(adict.keys())
  22. for fname in fnames:
  23. obj = adict[fname]
  24. n = len(obj)
  25. if not isinstance(obj, tuple) or n not in [2, 3]:
  26. raise ValueError("entry not a 2- or 3- tuple")
  27. if (n > 2) and (obj[2] == fname):
  28. continue
  29. num = int(obj[1])
  30. if (num < 0):
  31. raise ValueError("invalid offset.")
  32. format = dtype(obj[0], align=align)
  33. if (n > 2):
  34. title = obj[2]
  35. else:
  36. title = None
  37. allfields.append((fname, format, num, title))
  38. # sort by offsets
  39. allfields.sort(key=lambda x: x[2])
  40. names = [x[0] for x in allfields]
  41. formats = [x[1] for x in allfields]
  42. offsets = [x[2] for x in allfields]
  43. titles = [x[3] for x in allfields]
  44. return names, formats, offsets, titles
  45. # Called in PyArray_DescrConverter function when
  46. # a dictionary without "names" and "formats"
  47. # fields is used as a data-type descriptor.
  48. def _usefields(adict, align):
  49. try:
  50. names = adict[-1]
  51. except KeyError:
  52. names = None
  53. if names is None:
  54. names, formats, offsets, titles = _makenames_list(adict, align)
  55. else:
  56. formats = []
  57. offsets = []
  58. titles = []
  59. for name in names:
  60. res = adict[name]
  61. formats.append(res[0])
  62. offsets.append(res[1])
  63. if (len(res) > 2):
  64. titles.append(res[2])
  65. else:
  66. titles.append(None)
  67. return dtype({"names": names,
  68. "formats": formats,
  69. "offsets": offsets,
  70. "titles": titles}, align)
  71. # construct an array_protocol descriptor list
  72. # from the fields attribute of a descriptor
  73. # This calls itself recursively but should eventually hit
  74. # a descriptor that has no fields and then return
  75. # a simple typestring
  76. def _array_descr(descriptor):
  77. fields = descriptor.fields
  78. if fields is None:
  79. subdtype = descriptor.subdtype
  80. if subdtype is None:
  81. if descriptor.metadata is None:
  82. return descriptor.str
  83. else:
  84. new = descriptor.metadata.copy()
  85. if new:
  86. return (descriptor.str, new)
  87. else:
  88. return descriptor.str
  89. else:
  90. return (_array_descr(subdtype[0]), subdtype[1])
  91. names = descriptor.names
  92. ordered_fields = [fields[x] + (x,) for x in names]
  93. result = []
  94. offset = 0
  95. for field in ordered_fields:
  96. if field[1] > offset:
  97. num = field[1] - offset
  98. result.append(('', '|V%d' % num))
  99. offset += num
  100. elif field[1] < offset:
  101. raise ValueError(
  102. "dtype.descr is not defined for types with overlapping or "
  103. "out-of-order fields")
  104. if len(field) > 3:
  105. name = (field[2], field[3])
  106. else:
  107. name = field[2]
  108. if field[0].subdtype:
  109. tup = (name, _array_descr(field[0].subdtype[0]),
  110. field[0].subdtype[1])
  111. else:
  112. tup = (name, _array_descr(field[0]))
  113. offset += field[0].itemsize
  114. result.append(tup)
  115. if descriptor.itemsize > offset:
  116. num = descriptor.itemsize - offset
  117. result.append(('', '|V%d' % num))
  118. return result
  119. # Build a new array from the information in a pickle.
  120. # Note that the name numpy.core._internal._reconstruct is embedded in
  121. # pickles of ndarrays made with NumPy before release 1.0
  122. # so don't remove the name here, or you'll
  123. # break backward compatibility.
  124. def _reconstruct(subtype, shape, dtype):
  125. return ndarray.__new__(subtype, shape, dtype)
  126. # format_re was originally from numarray by J. Todd Miller
  127. format_re = re.compile(r'(?P<order1>[<>|=]?)'
  128. r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
  129. r'(?P<order2>[<>|=]?)'
  130. r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
  131. sep_re = re.compile(r'\s*,\s*')
  132. space_re = re.compile(r'\s+$')
  133. # astr is a string (perhaps comma separated)
  134. _convorder = {'=': _nbo}
  135. def _commastring(astr):
  136. startindex = 0
  137. result = []
  138. while startindex < len(astr):
  139. mo = format_re.match(astr, pos=startindex)
  140. try:
  141. (order1, repeats, order2, dtype) = mo.groups()
  142. except (TypeError, AttributeError):
  143. raise ValueError('format number %d of "%s" is not recognized' %
  144. (len(result)+1, astr))
  145. startindex = mo.end()
  146. # Separator or ending padding
  147. if startindex < len(astr):
  148. if space_re.match(astr, pos=startindex):
  149. startindex = len(astr)
  150. else:
  151. mo = sep_re.match(astr, pos=startindex)
  152. if not mo:
  153. raise ValueError(
  154. 'format number %d of "%s" is not recognized' %
  155. (len(result)+1, astr))
  156. startindex = mo.end()
  157. if order2 == '':
  158. order = order1
  159. elif order1 == '':
  160. order = order2
  161. else:
  162. order1 = _convorder.get(order1, order1)
  163. order2 = _convorder.get(order2, order2)
  164. if (order1 != order2):
  165. raise ValueError(
  166. 'inconsistent byte-order specification %s and %s' %
  167. (order1, order2))
  168. order = order1
  169. if order in ['|', '=', _nbo]:
  170. order = ''
  171. dtype = order + dtype
  172. if (repeats == ''):
  173. newitem = dtype
  174. else:
  175. newitem = (dtype, ast.literal_eval(repeats))
  176. result.append(newitem)
  177. return result
  178. class dummy_ctype:
  179. def __init__(self, cls):
  180. self._cls = cls
  181. def __mul__(self, other):
  182. return self
  183. def __call__(self, *other):
  184. return self._cls(other)
  185. def __eq__(self, other):
  186. return self._cls == other._cls
  187. def __ne__(self, other):
  188. return self._cls != other._cls
  189. def _getintp_ctype():
  190. val = _getintp_ctype.cache
  191. if val is not None:
  192. return val
  193. if ctypes is None:
  194. import numpy as np
  195. val = dummy_ctype(np.intp)
  196. else:
  197. char = dtype('p').char
  198. if (char == 'i'):
  199. val = ctypes.c_int
  200. elif char == 'l':
  201. val = ctypes.c_long
  202. elif char == 'q':
  203. val = ctypes.c_longlong
  204. else:
  205. val = ctypes.c_long
  206. _getintp_ctype.cache = val
  207. return val
  208. _getintp_ctype.cache = None
  209. # Used for .ctypes attribute of ndarray
  210. class _missing_ctypes:
  211. def cast(self, num, obj):
  212. return num.value
  213. class c_void_p:
  214. def __init__(self, ptr):
  215. self.value = ptr
  216. class _ctypes:
  217. def __init__(self, array, ptr=None):
  218. self._arr = array
  219. if ctypes:
  220. self._ctypes = ctypes
  221. self._data = self._ctypes.c_void_p(ptr)
  222. else:
  223. # fake a pointer-like object that holds onto the reference
  224. self._ctypes = _missing_ctypes()
  225. self._data = self._ctypes.c_void_p(ptr)
  226. self._data._objects = array
  227. if self._arr.ndim == 0:
  228. self._zerod = True
  229. else:
  230. self._zerod = False
  231. def data_as(self, obj):
  232. """
  233. Return the data pointer cast to a particular c-types object.
  234. For example, calling ``self._as_parameter_`` is equivalent to
  235. ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
  236. pointer to a ctypes array of floating-point data:
  237. ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
  238. The returned pointer will keep a reference to the array.
  239. """
  240. # _ctypes.cast function causes a circular reference of self._data in
  241. # self._data._objects. Attributes of self._data cannot be released
  242. # until gc.collect is called. Make a copy of the pointer first then let
  243. # it hold the array reference. This is a workaround to circumvent the
  244. # CPython bug https://bugs.python.org/issue12836
  245. ptr = self._ctypes.cast(self._data, obj)
  246. ptr._arr = self._arr
  247. return ptr
  248. def shape_as(self, obj):
  249. """
  250. Return the shape tuple as an array of some other c-types
  251. type. For example: ``self.shape_as(ctypes.c_short)``.
  252. """
  253. if self._zerod:
  254. return None
  255. return (obj*self._arr.ndim)(*self._arr.shape)
  256. def strides_as(self, obj):
  257. """
  258. Return the strides tuple as an array of some other
  259. c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
  260. """
  261. if self._zerod:
  262. return None
  263. return (obj*self._arr.ndim)(*self._arr.strides)
  264. @property
  265. def data(self):
  266. """
  267. A pointer to the memory area of the array as a Python integer.
  268. This memory area may contain data that is not aligned, or not in correct
  269. byte-order. The memory area may not even be writeable. The array
  270. flags and data-type of this array should be respected when passing this
  271. attribute to arbitrary C-code to avoid trouble that can include Python
  272. crashing. User Beware! The value of this attribute is exactly the same
  273. as ``self._array_interface_['data'][0]``.
  274. Note that unlike ``data_as``, a reference will not be kept to the array:
  275. code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
  276. pointer to a deallocated array, and should be spelt
  277. ``(a + b).ctypes.data_as(ctypes.c_void_p)``
  278. """
  279. return self._data.value
  280. @property
  281. def shape(self):
  282. """
  283. (c_intp*self.ndim): A ctypes array of length self.ndim where
  284. the basetype is the C-integer corresponding to ``dtype('p')`` on this
  285. platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or
  286. `ctypes.c_longlong` depending on the platform.
  287. The c_intp type is defined accordingly in `numpy.ctypeslib`.
  288. The ctypes array contains the shape of the underlying array.
  289. """
  290. return self.shape_as(_getintp_ctype())
  291. @property
  292. def strides(self):
  293. """
  294. (c_intp*self.ndim): A ctypes array of length self.ndim where
  295. the basetype is the same as for the shape attribute. This ctypes array
  296. contains the strides information from the underlying array. This strides
  297. information is important for showing how many bytes must be jumped to
  298. get to the next element in the array.
  299. """
  300. return self.strides_as(_getintp_ctype())
  301. @property
  302. def _as_parameter_(self):
  303. """
  304. Overrides the ctypes semi-magic method
  305. Enables `c_func(some_array.ctypes)`
  306. """
  307. return self.data_as(ctypes.c_void_p)
  308. # kept for compatibility
  309. get_data = data.fget
  310. get_shape = shape.fget
  311. get_strides = strides.fget
  312. get_as_parameter = _as_parameter_.fget
  313. def _newnames(datatype, order):
  314. """
  315. Given a datatype and an order object, return a new names tuple, with the
  316. order indicated
  317. """
  318. oldnames = datatype.names
  319. nameslist = list(oldnames)
  320. if isinstance(order, str):
  321. order = [order]
  322. seen = set()
  323. if isinstance(order, (list, tuple)):
  324. for name in order:
  325. try:
  326. nameslist.remove(name)
  327. except ValueError:
  328. if name in seen:
  329. raise ValueError("duplicate field name: %s" % (name,))
  330. else:
  331. raise ValueError("unknown field name: %s" % (name,))
  332. seen.add(name)
  333. return tuple(list(order) + nameslist)
  334. raise ValueError("unsupported order value: %s" % (order,))
  335. def _copy_fields(ary):
  336. """Return copy of structured array with padding between fields removed.
  337. Parameters
  338. ----------
  339. ary : ndarray
  340. Structured array from which to remove padding bytes
  341. Returns
  342. -------
  343. ary_copy : ndarray
  344. Copy of ary with padding bytes removed
  345. """
  346. dt = ary.dtype
  347. copy_dtype = {'names': dt.names,
  348. 'formats': [dt.fields[name][0] for name in dt.names]}
  349. return array(ary, dtype=copy_dtype, copy=True)
  350. def _getfield_is_safe(oldtype, newtype, offset):
  351. """ Checks safety of getfield for object arrays.
  352. As in _view_is_safe, we need to check that memory containing objects is not
  353. reinterpreted as a non-object datatype and vice versa.
  354. Parameters
  355. ----------
  356. oldtype : data-type
  357. Data type of the original ndarray.
  358. newtype : data-type
  359. Data type of the field being accessed by ndarray.getfield
  360. offset : int
  361. Offset of the field being accessed by ndarray.getfield
  362. Raises
  363. ------
  364. TypeError
  365. If the field access is invalid
  366. """
  367. if newtype.hasobject or oldtype.hasobject:
  368. if offset == 0 and newtype == oldtype:
  369. return
  370. if oldtype.names is not None:
  371. for name in oldtype.names:
  372. if (oldtype.fields[name][1] == offset and
  373. oldtype.fields[name][0] == newtype):
  374. return
  375. raise TypeError("Cannot get/set field of an object array")
  376. return
  377. def _view_is_safe(oldtype, newtype):
  378. """ Checks safety of a view involving object arrays, for example when
  379. doing::
  380. np.zeros(10, dtype=oldtype).view(newtype)
  381. Parameters
  382. ----------
  383. oldtype : data-type
  384. Data type of original ndarray
  385. newtype : data-type
  386. Data type of the view
  387. Raises
  388. ------
  389. TypeError
  390. If the new type is incompatible with the old type.
  391. """
  392. # if the types are equivalent, there is no problem.
  393. # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
  394. if oldtype == newtype:
  395. return
  396. if newtype.hasobject or oldtype.hasobject:
  397. raise TypeError("Cannot change data-type for object array.")
  398. return
  399. # Given a string containing a PEP 3118 format specifier,
  400. # construct a NumPy dtype
  401. _pep3118_native_map = {
  402. '?': '?',
  403. 'c': 'S1',
  404. 'b': 'b',
  405. 'B': 'B',
  406. 'h': 'h',
  407. 'H': 'H',
  408. 'i': 'i',
  409. 'I': 'I',
  410. 'l': 'l',
  411. 'L': 'L',
  412. 'q': 'q',
  413. 'Q': 'Q',
  414. 'e': 'e',
  415. 'f': 'f',
  416. 'd': 'd',
  417. 'g': 'g',
  418. 'Zf': 'F',
  419. 'Zd': 'D',
  420. 'Zg': 'G',
  421. 's': 'S',
  422. 'w': 'U',
  423. 'O': 'O',
  424. 'x': 'V', # padding
  425. }
  426. _pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
  427. _pep3118_standard_map = {
  428. '?': '?',
  429. 'c': 'S1',
  430. 'b': 'b',
  431. 'B': 'B',
  432. 'h': 'i2',
  433. 'H': 'u2',
  434. 'i': 'i4',
  435. 'I': 'u4',
  436. 'l': 'i4',
  437. 'L': 'u4',
  438. 'q': 'i8',
  439. 'Q': 'u8',
  440. 'e': 'f2',
  441. 'f': 'f',
  442. 'd': 'd',
  443. 'Zf': 'F',
  444. 'Zd': 'D',
  445. 's': 'S',
  446. 'w': 'U',
  447. 'O': 'O',
  448. 'x': 'V', # padding
  449. }
  450. _pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
  451. _pep3118_unsupported_map = {
  452. 'u': 'UCS-2 strings',
  453. '&': 'pointers',
  454. 't': 'bitfields',
  455. 'X': 'function pointers',
  456. }
  457. class _Stream:
  458. def __init__(self, s):
  459. self.s = s
  460. self.byteorder = '@'
  461. def advance(self, n):
  462. res = self.s[:n]
  463. self.s = self.s[n:]
  464. return res
  465. def consume(self, c):
  466. if self.s[:len(c)] == c:
  467. self.advance(len(c))
  468. return True
  469. return False
  470. def consume_until(self, c):
  471. if callable(c):
  472. i = 0
  473. while i < len(self.s) and not c(self.s[i]):
  474. i = i + 1
  475. return self.advance(i)
  476. else:
  477. i = self.s.index(c)
  478. res = self.advance(i)
  479. self.advance(len(c))
  480. return res
  481. @property
  482. def next(self):
  483. return self.s[0]
  484. def __bool__(self):
  485. return bool(self.s)
  486. def _dtype_from_pep3118(spec):
  487. stream = _Stream(spec)
  488. dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
  489. return dtype
  490. def __dtype_from_pep3118(stream, is_subdtype):
  491. field_spec = dict(
  492. names=[],
  493. formats=[],
  494. offsets=[],
  495. itemsize=0
  496. )
  497. offset = 0
  498. common_alignment = 1
  499. is_padding = False
  500. # Parse spec
  501. while stream:
  502. value = None
  503. # End of structure, bail out to upper level
  504. if stream.consume('}'):
  505. break
  506. # Sub-arrays (1)
  507. shape = None
  508. if stream.consume('('):
  509. shape = stream.consume_until(')')
  510. shape = tuple(map(int, shape.split(',')))
  511. # Byte order
  512. if stream.next in ('@', '=', '<', '>', '^', '!'):
  513. byteorder = stream.advance(1)
  514. if byteorder == '!':
  515. byteorder = '>'
  516. stream.byteorder = byteorder
  517. # Byte order characters also control native vs. standard type sizes
  518. if stream.byteorder in ('@', '^'):
  519. type_map = _pep3118_native_map
  520. type_map_chars = _pep3118_native_typechars
  521. else:
  522. type_map = _pep3118_standard_map
  523. type_map_chars = _pep3118_standard_typechars
  524. # Item sizes
  525. itemsize_str = stream.consume_until(lambda c: not c.isdigit())
  526. if itemsize_str:
  527. itemsize = int(itemsize_str)
  528. else:
  529. itemsize = 1
  530. # Data types
  531. is_padding = False
  532. if stream.consume('T{'):
  533. value, align = __dtype_from_pep3118(
  534. stream, is_subdtype=True)
  535. elif stream.next in type_map_chars:
  536. if stream.next == 'Z':
  537. typechar = stream.advance(2)
  538. else:
  539. typechar = stream.advance(1)
  540. is_padding = (typechar == 'x')
  541. dtypechar = type_map[typechar]
  542. if dtypechar in 'USV':
  543. dtypechar += '%d' % itemsize
  544. itemsize = 1
  545. numpy_byteorder = {'@': '=', '^': '='}.get(
  546. stream.byteorder, stream.byteorder)
  547. value = dtype(numpy_byteorder + dtypechar)
  548. align = value.alignment
  549. elif stream.next in _pep3118_unsupported_map:
  550. desc = _pep3118_unsupported_map[stream.next]
  551. raise NotImplementedError(
  552. "Unrepresentable PEP 3118 data type {!r} ({})"
  553. .format(stream.next, desc))
  554. else:
  555. raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
  556. #
  557. # Native alignment may require padding
  558. #
  559. # Here we assume that the presence of a '@' character implicitly implies
  560. # that the start of the array is *already* aligned.
  561. #
  562. extra_offset = 0
  563. if stream.byteorder == '@':
  564. start_padding = (-offset) % align
  565. intra_padding = (-value.itemsize) % align
  566. offset += start_padding
  567. if intra_padding != 0:
  568. if itemsize > 1 or (shape is not None and _prod(shape) > 1):
  569. # Inject internal padding to the end of the sub-item
  570. value = _add_trailing_padding(value, intra_padding)
  571. else:
  572. # We can postpone the injection of internal padding,
  573. # as the item appears at most once
  574. extra_offset += intra_padding
  575. # Update common alignment
  576. common_alignment = _lcm(align, common_alignment)
  577. # Convert itemsize to sub-array
  578. if itemsize != 1:
  579. value = dtype((value, (itemsize,)))
  580. # Sub-arrays (2)
  581. if shape is not None:
  582. value = dtype((value, shape))
  583. # Field name
  584. if stream.consume(':'):
  585. name = stream.consume_until(':')
  586. else:
  587. name = None
  588. if not (is_padding and name is None):
  589. if name is not None and name in field_spec['names']:
  590. raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
  591. % name)
  592. field_spec['names'].append(name)
  593. field_spec['formats'].append(value)
  594. field_spec['offsets'].append(offset)
  595. offset += value.itemsize
  596. offset += extra_offset
  597. field_spec['itemsize'] = offset
  598. # extra final padding for aligned types
  599. if stream.byteorder == '@':
  600. field_spec['itemsize'] += (-offset) % common_alignment
  601. # Check if this was a simple 1-item type, and unwrap it
  602. if (field_spec['names'] == [None]
  603. and field_spec['offsets'][0] == 0
  604. and field_spec['itemsize'] == field_spec['formats'][0].itemsize
  605. and not is_subdtype):
  606. ret = field_spec['formats'][0]
  607. else:
  608. _fix_names(field_spec)
  609. ret = dtype(field_spec)
  610. # Finished
  611. return ret, common_alignment
  612. def _fix_names(field_spec):
  613. """ Replace names which are None with the next unused f%d name """
  614. names = field_spec['names']
  615. for i, name in enumerate(names):
  616. if name is not None:
  617. continue
  618. j = 0
  619. while True:
  620. name = 'f{}'.format(j)
  621. if name not in names:
  622. break
  623. j = j + 1
  624. names[i] = name
  625. def _add_trailing_padding(value, padding):
  626. """Inject the specified number of padding bytes at the end of a dtype"""
  627. if value.fields is None:
  628. field_spec = dict(
  629. names=['f0'],
  630. formats=[value],
  631. offsets=[0],
  632. itemsize=value.itemsize
  633. )
  634. else:
  635. fields = value.fields
  636. names = value.names
  637. field_spec = dict(
  638. names=names,
  639. formats=[fields[name][0] for name in names],
  640. offsets=[fields[name][1] for name in names],
  641. itemsize=value.itemsize
  642. )
  643. field_spec['itemsize'] += padding
  644. return dtype(field_spec)
  645. def _prod(a):
  646. p = 1
  647. for x in a:
  648. p *= x
  649. return p
  650. def _gcd(a, b):
  651. """Calculate the greatest common divisor of a and b"""
  652. while b:
  653. a, b = b, a % b
  654. return a
  655. def _lcm(a, b):
  656. return a // _gcd(a, b) * b
  657. def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
  658. """ Format the error message for when __array_ufunc__ gives up. """
  659. args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
  660. ['{}={!r}'.format(k, v)
  661. for k, v in kwargs.items()])
  662. args = inputs + kwargs.get('out', ())
  663. types_string = ', '.join(repr(type(arg).__name__) for arg in args)
  664. return ('operand type(s) all returned NotImplemented from '
  665. '__array_ufunc__({!r}, {!r}, {}): {}'
  666. .format(ufunc, method, args_string, types_string))
  667. def array_function_errmsg_formatter(public_api, types):
  668. """ Format the error message for when __array_ufunc__ gives up. """
  669. func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
  670. return ("no implementation found for '{}' on types that implement "
  671. '__array_function__: {}'.format(func_name, list(types)))
  672. def _ufunc_doc_signature_formatter(ufunc):
  673. """
  674. Builds a signature string which resembles PEP 457
  675. This is used to construct the first line of the docstring
  676. """
  677. # input arguments are simple
  678. if ufunc.nin == 1:
  679. in_args = 'x'
  680. else:
  681. in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin))
  682. # output arguments are both keyword or positional
  683. if ufunc.nout == 0:
  684. out_args = ', /, out=()'
  685. elif ufunc.nout == 1:
  686. out_args = ', /, out=None'
  687. else:
  688. out_args = '[, {positional}], / [, out={default}]'.format(
  689. positional=', '.join(
  690. 'out{}'.format(i+1) for i in range(ufunc.nout)),
  691. default=repr((None,)*ufunc.nout)
  692. )
  693. # keyword only args depend on whether this is a gufunc
  694. kwargs = (
  695. ", casting='same_kind'"
  696. ", order='K'"
  697. ", dtype=None"
  698. ", subok=True"
  699. "[, signature"
  700. ", extobj]"
  701. )
  702. if ufunc.signature is None:
  703. kwargs = ", where=True" + kwargs
  704. # join all the parts together
  705. return '{name}({in_args}{out_args}, *{kwargs})'.format(
  706. name=ufunc.__name__,
  707. in_args=in_args,
  708. out_args=out_args,
  709. kwargs=kwargs
  710. )
  711. def npy_ctypes_check(cls):
  712. # determine if a class comes from ctypes, in order to work around
  713. # a bug in the buffer protocol for those objects, bpo-10746
  714. try:
  715. # ctypes class are new-style, so have an __mro__. This probably fails
  716. # for ctypes classes with multiple inheritance.
  717. if IS_PYPY:
  718. # (..., _ctypes.basics._CData, Bufferable, object)
  719. ctype_base = cls.__mro__[-3]
  720. else:
  721. # # (..., _ctypes._CData, object)
  722. ctype_base = cls.__mro__[-2]
  723. # right now, they're part of the _ctypes module
  724. return '_ctypes' in ctype_base.__module__
  725. except Exception:
  726. return False
  727. class recursive:
  728. '''
  729. A decorator class for recursive nested functions.
  730. Naive recursive nested functions hold a reference to themselves:
  731. def outer(*args):
  732. def stringify_leaky(arg0, *arg1):
  733. if len(arg1) > 0:
  734. return stringify_leaky(*arg1) # <- HERE
  735. return str(arg0)
  736. stringify_leaky(*args)
  737. This design pattern creates a reference cycle that is difficult for a
  738. garbage collector to resolve. The decorator class prevents the
  739. cycle by passing the nested function in as an argument `self`:
  740. def outer(*args):
  741. @recursive
  742. def stringify(self, arg0, *arg1):
  743. if len(arg1) > 0:
  744. return self(*arg1)
  745. return str(arg0)
  746. stringify(*args)
  747. '''
  748. def __init__(self, func):
  749. self.func = func
  750. def __call__(self, *args, **kwargs):
  751. return self.func(self, *args, **kwargs)