linalg.py 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814
  1. """Lite version of scipy.linalg.
  2. Notes
  3. -----
  4. This module is a lite version of the linalg.py module in SciPy which
  5. contains high-level Python interface to the LAPACK library. The lite
  6. version only accesses the following LAPACK functions: dgesv, zgesv,
  7. dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
  8. zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
  9. """
  10. __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
  11. 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
  12. 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
  13. 'LinAlgError', 'multi_dot']
  14. import functools
  15. import operator
  16. import warnings
  17. from numpy.core import (
  18. array, asarray, zeros, empty, empty_like, intc, single, double,
  19. csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
  20. add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
  21. finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
  22. atleast_2d, intp, asanyarray, object_, matmul,
  23. swapaxes, divide, count_nonzero, isnan, sign, argsort, sort
  24. )
  25. from numpy.core.multiarray import normalize_axis_index
  26. from numpy.core.overrides import set_module
  27. from numpy.core import overrides
  28. from numpy.lib.twodim_base import triu, eye
  29. from numpy.linalg import lapack_lite, _umath_linalg
  30. array_function_dispatch = functools.partial(
  31. overrides.array_function_dispatch, module='numpy.linalg')
  32. fortran_int = intc
  33. @set_module('numpy.linalg')
  34. class LinAlgError(Exception):
  35. """
  36. Generic Python-exception-derived object raised by linalg functions.
  37. General purpose exception class, derived from Python's exception.Exception
  38. class, programmatically raised in linalg functions when a Linear
  39. Algebra-related condition would prevent further correct execution of the
  40. function.
  41. Parameters
  42. ----------
  43. None
  44. Examples
  45. --------
  46. >>> from numpy import linalg as LA
  47. >>> LA.inv(np.zeros((2,2)))
  48. Traceback (most recent call last):
  49. File "<stdin>", line 1, in <module>
  50. File "...linalg.py", line 350,
  51. in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
  52. File "...linalg.py", line 249,
  53. in solve
  54. raise LinAlgError('Singular matrix')
  55. numpy.linalg.LinAlgError: Singular matrix
  56. """
  57. def _determine_error_states():
  58. errobj = geterrobj()
  59. bufsize = errobj[0]
  60. with errstate(invalid='call', over='ignore',
  61. divide='ignore', under='ignore'):
  62. invalid_call_errmask = geterrobj()[1]
  63. return [bufsize, invalid_call_errmask, None]
  64. # Dealing with errors in _umath_linalg
  65. _linalg_error_extobj = _determine_error_states()
  66. del _determine_error_states
  67. def _raise_linalgerror_singular(err, flag):
  68. raise LinAlgError("Singular matrix")
  69. def _raise_linalgerror_nonposdef(err, flag):
  70. raise LinAlgError("Matrix is not positive definite")
  71. def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
  72. raise LinAlgError("Eigenvalues did not converge")
  73. def _raise_linalgerror_svd_nonconvergence(err, flag):
  74. raise LinAlgError("SVD did not converge")
  75. def _raise_linalgerror_lstsq(err, flag):
  76. raise LinAlgError("SVD did not converge in Linear Least Squares")
  77. def get_linalg_error_extobj(callback):
  78. extobj = list(_linalg_error_extobj) # make a copy
  79. extobj[2] = callback
  80. return extobj
  81. def _makearray(a):
  82. new = asarray(a)
  83. wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
  84. return new, wrap
  85. def isComplexType(t):
  86. return issubclass(t, complexfloating)
  87. _real_types_map = {single : single,
  88. double : double,
  89. csingle : single,
  90. cdouble : double}
  91. _complex_types_map = {single : csingle,
  92. double : cdouble,
  93. csingle : csingle,
  94. cdouble : cdouble}
  95. def _realType(t, default=double):
  96. return _real_types_map.get(t, default)
  97. def _complexType(t, default=cdouble):
  98. return _complex_types_map.get(t, default)
  99. def _linalgRealType(t):
  100. """Cast the type t to either double or cdouble."""
  101. return double
  102. def _commonType(*arrays):
  103. # in lite version, use higher precision (always double or cdouble)
  104. result_type = single
  105. is_complex = False
  106. for a in arrays:
  107. if issubclass(a.dtype.type, inexact):
  108. if isComplexType(a.dtype.type):
  109. is_complex = True
  110. rt = _realType(a.dtype.type, default=None)
  111. if rt is None:
  112. # unsupported inexact scalar
  113. raise TypeError("array type %s is unsupported in linalg" %
  114. (a.dtype.name,))
  115. else:
  116. rt = double
  117. if rt is double:
  118. result_type = double
  119. if is_complex:
  120. t = cdouble
  121. result_type = _complex_types_map[result_type]
  122. else:
  123. t = double
  124. return t, result_type
  125. # _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
  126. _fastCT = fastCopyAndTranspose
  127. def _to_native_byte_order(*arrays):
  128. ret = []
  129. for arr in arrays:
  130. if arr.dtype.byteorder not in ('=', '|'):
  131. ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
  132. else:
  133. ret.append(arr)
  134. if len(ret) == 1:
  135. return ret[0]
  136. else:
  137. return ret
  138. def _fastCopyAndTranspose(type, *arrays):
  139. cast_arrays = ()
  140. for a in arrays:
  141. if a.dtype.type is type:
  142. cast_arrays = cast_arrays + (_fastCT(a),)
  143. else:
  144. cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
  145. if len(cast_arrays) == 1:
  146. return cast_arrays[0]
  147. else:
  148. return cast_arrays
  149. def _assert_2d(*arrays):
  150. for a in arrays:
  151. if a.ndim != 2:
  152. raise LinAlgError('%d-dimensional array given. Array must be '
  153. 'two-dimensional' % a.ndim)
  154. def _assert_stacked_2d(*arrays):
  155. for a in arrays:
  156. if a.ndim < 2:
  157. raise LinAlgError('%d-dimensional array given. Array must be '
  158. 'at least two-dimensional' % a.ndim)
  159. def _assert_stacked_square(*arrays):
  160. for a in arrays:
  161. m, n = a.shape[-2:]
  162. if m != n:
  163. raise LinAlgError('Last 2 dimensions of the array must be square')
  164. def _assert_finite(*arrays):
  165. for a in arrays:
  166. if not isfinite(a).all():
  167. raise LinAlgError("Array must not contain infs or NaNs")
  168. def _is_empty_2d(arr):
  169. # check size first for efficiency
  170. return arr.size == 0 and product(arr.shape[-2:]) == 0
  171. def transpose(a):
  172. """
  173. Transpose each matrix in a stack of matrices.
  174. Unlike np.transpose, this only swaps the last two axes, rather than all of
  175. them
  176. Parameters
  177. ----------
  178. a : (...,M,N) array_like
  179. Returns
  180. -------
  181. aT : (...,N,M) ndarray
  182. """
  183. return swapaxes(a, -1, -2)
  184. # Linear equations
  185. def _tensorsolve_dispatcher(a, b, axes=None):
  186. return (a, b)
  187. @array_function_dispatch(_tensorsolve_dispatcher)
  188. def tensorsolve(a, b, axes=None):
  189. """
  190. Solve the tensor equation ``a x = b`` for x.
  191. It is assumed that all indices of `x` are summed over in the product,
  192. together with the rightmost indices of `a`, as is done in, for example,
  193. ``tensordot(a, x, axes=b.ndim)``.
  194. Parameters
  195. ----------
  196. a : array_like
  197. Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
  198. the shape of that sub-tensor of `a` consisting of the appropriate
  199. number of its rightmost indices, and must be such that
  200. ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
  201. 'square').
  202. b : array_like
  203. Right-hand tensor, which can be of any shape.
  204. axes : tuple of ints, optional
  205. Axes in `a` to reorder to the right, before inversion.
  206. If None (default), no reordering is done.
  207. Returns
  208. -------
  209. x : ndarray, shape Q
  210. Raises
  211. ------
  212. LinAlgError
  213. If `a` is singular or not 'square' (in the above sense).
  214. See Also
  215. --------
  216. numpy.tensordot, tensorinv, numpy.einsum
  217. Examples
  218. --------
  219. >>> a = np.eye(2*3*4)
  220. >>> a.shape = (2*3, 4, 2, 3, 4)
  221. >>> b = np.random.randn(2*3, 4)
  222. >>> x = np.linalg.tensorsolve(a, b)
  223. >>> x.shape
  224. (2, 3, 4)
  225. >>> np.allclose(np.tensordot(a, x, axes=3), b)
  226. True
  227. """
  228. a, wrap = _makearray(a)
  229. b = asarray(b)
  230. an = a.ndim
  231. if axes is not None:
  232. allaxes = list(range(0, an))
  233. for k in axes:
  234. allaxes.remove(k)
  235. allaxes.insert(an, k)
  236. a = a.transpose(allaxes)
  237. oldshape = a.shape[-(an-b.ndim):]
  238. prod = 1
  239. for k in oldshape:
  240. prod *= k
  241. a = a.reshape(-1, prod)
  242. b = b.ravel()
  243. res = wrap(solve(a, b))
  244. res.shape = oldshape
  245. return res
  246. def _solve_dispatcher(a, b):
  247. return (a, b)
  248. @array_function_dispatch(_solve_dispatcher)
  249. def solve(a, b):
  250. """
  251. Solve a linear matrix equation, or system of linear scalar equations.
  252. Computes the "exact" solution, `x`, of the well-determined, i.e., full
  253. rank, linear matrix equation `ax = b`.
  254. Parameters
  255. ----------
  256. a : (..., M, M) array_like
  257. Coefficient matrix.
  258. b : {(..., M,), (..., M, K)}, array_like
  259. Ordinate or "dependent variable" values.
  260. Returns
  261. -------
  262. x : {(..., M,), (..., M, K)} ndarray
  263. Solution to the system a x = b. Returned shape is identical to `b`.
  264. Raises
  265. ------
  266. LinAlgError
  267. If `a` is singular or not square.
  268. See Also
  269. --------
  270. scipy.linalg.solve : Similar function in SciPy.
  271. Notes
  272. -----
  273. .. versionadded:: 1.8.0
  274. Broadcasting rules apply, see the `numpy.linalg` documentation for
  275. details.
  276. The solutions are computed using LAPACK routine ``_gesv``.
  277. `a` must be square and of full-rank, i.e., all rows (or, equivalently,
  278. columns) must be linearly independent; if either is not true, use
  279. `lstsq` for the least-squares best "solution" of the
  280. system/equation.
  281. References
  282. ----------
  283. .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
  284. FL, Academic Press, Inc., 1980, pg. 22.
  285. Examples
  286. --------
  287. Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
  288. >>> a = np.array([[3,1], [1,2]])
  289. >>> b = np.array([9,8])
  290. >>> x = np.linalg.solve(a, b)
  291. >>> x
  292. array([2., 3.])
  293. Check that the solution is correct:
  294. >>> np.allclose(np.dot(a, x), b)
  295. True
  296. """
  297. a, _ = _makearray(a)
  298. _assert_stacked_2d(a)
  299. _assert_stacked_square(a)
  300. b, wrap = _makearray(b)
  301. t, result_t = _commonType(a, b)
  302. # We use the b = (..., M,) logic, only if the number of extra dimensions
  303. # match exactly
  304. if b.ndim == a.ndim - 1:
  305. gufunc = _umath_linalg.solve1
  306. else:
  307. gufunc = _umath_linalg.solve
  308. signature = 'DD->D' if isComplexType(t) else 'dd->d'
  309. extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
  310. r = gufunc(a, b, signature=signature, extobj=extobj)
  311. return wrap(r.astype(result_t, copy=False))
  312. def _tensorinv_dispatcher(a, ind=None):
  313. return (a,)
  314. @array_function_dispatch(_tensorinv_dispatcher)
  315. def tensorinv(a, ind=2):
  316. """
  317. Compute the 'inverse' of an N-dimensional array.
  318. The result is an inverse for `a` relative to the tensordot operation
  319. ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
  320. ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
  321. tensordot operation.
  322. Parameters
  323. ----------
  324. a : array_like
  325. Tensor to 'invert'. Its shape must be 'square', i. e.,
  326. ``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
  327. ind : int, optional
  328. Number of first indices that are involved in the inverse sum.
  329. Must be a positive integer, default is 2.
  330. Returns
  331. -------
  332. b : ndarray
  333. `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
  334. Raises
  335. ------
  336. LinAlgError
  337. If `a` is singular or not 'square' (in the above sense).
  338. See Also
  339. --------
  340. numpy.tensordot, tensorsolve
  341. Examples
  342. --------
  343. >>> a = np.eye(4*6)
  344. >>> a.shape = (4, 6, 8, 3)
  345. >>> ainv = np.linalg.tensorinv(a, ind=2)
  346. >>> ainv.shape
  347. (8, 3, 4, 6)
  348. >>> b = np.random.randn(4, 6)
  349. >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
  350. True
  351. >>> a = np.eye(4*6)
  352. >>> a.shape = (24, 8, 3)
  353. >>> ainv = np.linalg.tensorinv(a, ind=1)
  354. >>> ainv.shape
  355. (8, 3, 24)
  356. >>> b = np.random.randn(24)
  357. >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
  358. True
  359. """
  360. a = asarray(a)
  361. oldshape = a.shape
  362. prod = 1
  363. if ind > 0:
  364. invshape = oldshape[ind:] + oldshape[:ind]
  365. for k in oldshape[ind:]:
  366. prod *= k
  367. else:
  368. raise ValueError("Invalid ind argument.")
  369. a = a.reshape(prod, -1)
  370. ia = inv(a)
  371. return ia.reshape(*invshape)
  372. # Matrix inversion
  373. def _unary_dispatcher(a):
  374. return (a,)
  375. @array_function_dispatch(_unary_dispatcher)
  376. def inv(a):
  377. """
  378. Compute the (multiplicative) inverse of a matrix.
  379. Given a square matrix `a`, return the matrix `ainv` satisfying
  380. ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
  381. Parameters
  382. ----------
  383. a : (..., M, M) array_like
  384. Matrix to be inverted.
  385. Returns
  386. -------
  387. ainv : (..., M, M) ndarray or matrix
  388. (Multiplicative) inverse of the matrix `a`.
  389. Raises
  390. ------
  391. LinAlgError
  392. If `a` is not square or inversion fails.
  393. See Also
  394. --------
  395. scipy.linalg.inv : Similar function in SciPy.
  396. Notes
  397. -----
  398. .. versionadded:: 1.8.0
  399. Broadcasting rules apply, see the `numpy.linalg` documentation for
  400. details.
  401. Examples
  402. --------
  403. >>> from numpy.linalg import inv
  404. >>> a = np.array([[1., 2.], [3., 4.]])
  405. >>> ainv = inv(a)
  406. >>> np.allclose(np.dot(a, ainv), np.eye(2))
  407. True
  408. >>> np.allclose(np.dot(ainv, a), np.eye(2))
  409. True
  410. If a is a matrix object, then the return value is a matrix as well:
  411. >>> ainv = inv(np.matrix(a))
  412. >>> ainv
  413. matrix([[-2. , 1. ],
  414. [ 1.5, -0.5]])
  415. Inverses of several matrices can be computed at once:
  416. >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
  417. >>> inv(a)
  418. array([[[-2. , 1. ],
  419. [ 1.5 , -0.5 ]],
  420. [[-1.25, 0.75],
  421. [ 0.75, -0.25]]])
  422. """
  423. a, wrap = _makearray(a)
  424. _assert_stacked_2d(a)
  425. _assert_stacked_square(a)
  426. t, result_t = _commonType(a)
  427. signature = 'D->D' if isComplexType(t) else 'd->d'
  428. extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
  429. ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
  430. return wrap(ainv.astype(result_t, copy=False))
  431. def _matrix_power_dispatcher(a, n):
  432. return (a,)
  433. @array_function_dispatch(_matrix_power_dispatcher)
  434. def matrix_power(a, n):
  435. """
  436. Raise a square matrix to the (integer) power `n`.
  437. For positive integers `n`, the power is computed by repeated matrix
  438. squarings and matrix multiplications. If ``n == 0``, the identity matrix
  439. of the same shape as M is returned. If ``n < 0``, the inverse
  440. is computed and then raised to the ``abs(n)``.
  441. .. note:: Stacks of object matrices are not currently supported.
  442. Parameters
  443. ----------
  444. a : (..., M, M) array_like
  445. Matrix to be "powered".
  446. n : int
  447. The exponent can be any integer or long integer, positive,
  448. negative, or zero.
  449. Returns
  450. -------
  451. a**n : (..., M, M) ndarray or matrix object
  452. The return value is the same shape and type as `M`;
  453. if the exponent is positive or zero then the type of the
  454. elements is the same as those of `M`. If the exponent is
  455. negative the elements are floating-point.
  456. Raises
  457. ------
  458. LinAlgError
  459. For matrices that are not square or that (for negative powers) cannot
  460. be inverted numerically.
  461. Examples
  462. --------
  463. >>> from numpy.linalg import matrix_power
  464. >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
  465. >>> matrix_power(i, 3) # should = -i
  466. array([[ 0, -1],
  467. [ 1, 0]])
  468. >>> matrix_power(i, 0)
  469. array([[1, 0],
  470. [0, 1]])
  471. >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
  472. array([[ 0., 1.],
  473. [-1., 0.]])
  474. Somewhat more sophisticated example
  475. >>> q = np.zeros((4, 4))
  476. >>> q[0:2, 0:2] = -i
  477. >>> q[2:4, 2:4] = i
  478. >>> q # one of the three quaternion units not equal to 1
  479. array([[ 0., -1., 0., 0.],
  480. [ 1., 0., 0., 0.],
  481. [ 0., 0., 0., 1.],
  482. [ 0., 0., -1., 0.]])
  483. >>> matrix_power(q, 2) # = -np.eye(4)
  484. array([[-1., 0., 0., 0.],
  485. [ 0., -1., 0., 0.],
  486. [ 0., 0., -1., 0.],
  487. [ 0., 0., 0., -1.]])
  488. """
  489. a = asanyarray(a)
  490. _assert_stacked_2d(a)
  491. _assert_stacked_square(a)
  492. try:
  493. n = operator.index(n)
  494. except TypeError as e:
  495. raise TypeError("exponent must be an integer") from e
  496. # Fall back on dot for object arrays. Object arrays are not supported by
  497. # the current implementation of matmul using einsum
  498. if a.dtype != object:
  499. fmatmul = matmul
  500. elif a.ndim == 2:
  501. fmatmul = dot
  502. else:
  503. raise NotImplementedError(
  504. "matrix_power not supported for stacks of object arrays")
  505. if n == 0:
  506. a = empty_like(a)
  507. a[...] = eye(a.shape[-2], dtype=a.dtype)
  508. return a
  509. elif n < 0:
  510. a = inv(a)
  511. n = abs(n)
  512. # short-cuts.
  513. if n == 1:
  514. return a
  515. elif n == 2:
  516. return fmatmul(a, a)
  517. elif n == 3:
  518. return fmatmul(fmatmul(a, a), a)
  519. # Use binary decomposition to reduce the number of matrix multiplications.
  520. # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
  521. # increasing powers of 2, and multiply into the result as needed.
  522. z = result = None
  523. while n > 0:
  524. z = a if z is None else fmatmul(z, z)
  525. n, bit = divmod(n, 2)
  526. if bit:
  527. result = z if result is None else fmatmul(result, z)
  528. return result
  529. # Cholesky decomposition
  530. @array_function_dispatch(_unary_dispatcher)
  531. def cholesky(a):
  532. """
  533. Cholesky decomposition.
  534. Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
  535. where `L` is lower-triangular and .H is the conjugate transpose operator
  536. (which is the ordinary transpose if `a` is real-valued). `a` must be
  537. Hermitian (symmetric if real-valued) and positive-definite. No
  538. checking is performed to verify whether `a` is Hermitian or not.
  539. In addition, only the lower-triangular and diagonal elements of `a`
  540. are used. Only `L` is actually returned.
  541. Parameters
  542. ----------
  543. a : (..., M, M) array_like
  544. Hermitian (symmetric if all elements are real), positive-definite
  545. input matrix.
  546. Returns
  547. -------
  548. L : (..., M, M) array_like
  549. Upper or lower-triangular Cholesky factor of `a`. Returns a
  550. matrix object if `a` is a matrix object.
  551. Raises
  552. ------
  553. LinAlgError
  554. If the decomposition fails, for example, if `a` is not
  555. positive-definite.
  556. See Also
  557. --------
  558. scipy.linalg.cholesky : Similar function in SciPy.
  559. scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
  560. positive-definite matrix.
  561. scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
  562. `scipy.linalg.cho_solve`.
  563. Notes
  564. -----
  565. .. versionadded:: 1.8.0
  566. Broadcasting rules apply, see the `numpy.linalg` documentation for
  567. details.
  568. The Cholesky decomposition is often used as a fast way of solving
  569. .. math:: A \\mathbf{x} = \\mathbf{b}
  570. (when `A` is both Hermitian/symmetric and positive-definite).
  571. First, we solve for :math:`\\mathbf{y}` in
  572. .. math:: L \\mathbf{y} = \\mathbf{b},
  573. and then for :math:`\\mathbf{x}` in
  574. .. math:: L.H \\mathbf{x} = \\mathbf{y}.
  575. Examples
  576. --------
  577. >>> A = np.array([[1,-2j],[2j,5]])
  578. >>> A
  579. array([[ 1.+0.j, -0.-2.j],
  580. [ 0.+2.j, 5.+0.j]])
  581. >>> L = np.linalg.cholesky(A)
  582. >>> L
  583. array([[1.+0.j, 0.+0.j],
  584. [0.+2.j, 1.+0.j]])
  585. >>> np.dot(L, L.T.conj()) # verify that L * L.H = A
  586. array([[1.+0.j, 0.-2.j],
  587. [0.+2.j, 5.+0.j]])
  588. >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
  589. >>> np.linalg.cholesky(A) # an ndarray object is returned
  590. array([[1.+0.j, 0.+0.j],
  591. [0.+2.j, 1.+0.j]])
  592. >>> # But a matrix object is returned if A is a matrix object
  593. >>> np.linalg.cholesky(np.matrix(A))
  594. matrix([[ 1.+0.j, 0.+0.j],
  595. [ 0.+2.j, 1.+0.j]])
  596. """
  597. extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
  598. gufunc = _umath_linalg.cholesky_lo
  599. a, wrap = _makearray(a)
  600. _assert_stacked_2d(a)
  601. _assert_stacked_square(a)
  602. t, result_t = _commonType(a)
  603. signature = 'D->D' if isComplexType(t) else 'd->d'
  604. r = gufunc(a, signature=signature, extobj=extobj)
  605. return wrap(r.astype(result_t, copy=False))
  606. # QR decomposition
  607. def _qr_dispatcher(a, mode=None):
  608. return (a,)
  609. @array_function_dispatch(_qr_dispatcher)
  610. def qr(a, mode='reduced'):
  611. """
  612. Compute the qr factorization of a matrix.
  613. Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
  614. upper-triangular.
  615. Parameters
  616. ----------
  617. a : array_like, shape (M, N)
  618. Matrix to be factored.
  619. mode : {'reduced', 'complete', 'r', 'raw'}, optional
  620. If K = min(M, N), then
  621. * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
  622. * 'complete' : returns q, r with dimensions (M, M), (M, N)
  623. * 'r' : returns r only with dimensions (K, N)
  624. * 'raw' : returns h, tau with dimensions (N, M), (K,)
  625. The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
  626. see the notes for more information. The default is 'reduced', and to
  627. maintain backward compatibility with earlier versions of numpy both
  628. it and the old default 'full' can be omitted. Note that array h
  629. returned in 'raw' mode is transposed for calling Fortran. The
  630. 'economic' mode is deprecated. The modes 'full' and 'economic' may
  631. be passed using only the first letter for backwards compatibility,
  632. but all others must be spelled out. See the Notes for more
  633. explanation.
  634. Returns
  635. -------
  636. q : ndarray of float or complex, optional
  637. A matrix with orthonormal columns. When mode = 'complete' the
  638. result is an orthogonal/unitary matrix depending on whether or not
  639. a is real/complex. The determinant may be either +/- 1 in that
  640. case.
  641. r : ndarray of float or complex, optional
  642. The upper-triangular matrix.
  643. (h, tau) : ndarrays of np.double or np.cdouble, optional
  644. The array h contains the Householder reflectors that generate q
  645. along with r. The tau array contains scaling factors for the
  646. reflectors. In the deprecated 'economic' mode only h is returned.
  647. Raises
  648. ------
  649. LinAlgError
  650. If factoring fails.
  651. See Also
  652. --------
  653. scipy.linalg.qr : Similar function in SciPy.
  654. scipy.linalg.rq : Compute RQ decomposition of a matrix.
  655. Notes
  656. -----
  657. This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
  658. ``dorgqr``, and ``zungqr``.
  659. For more information on the qr factorization, see for example:
  660. https://en.wikipedia.org/wiki/QR_factorization
  661. Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
  662. `a` is of type `matrix`, all the return values will be matrices too.
  663. New 'reduced', 'complete', and 'raw' options for mode were added in
  664. NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
  665. addition the options 'full' and 'economic' were deprecated. Because
  666. 'full' was the previous default and 'reduced' is the new default,
  667. backward compatibility can be maintained by letting `mode` default.
  668. The 'raw' option was added so that LAPACK routines that can multiply
  669. arrays by q using the Householder reflectors can be used. Note that in
  670. this case the returned arrays are of type np.double or np.cdouble and
  671. the h array is transposed to be FORTRAN compatible. No routines using
  672. the 'raw' return are currently exposed by numpy, but some are available
  673. in lapack_lite and just await the necessary work.
  674. Examples
  675. --------
  676. >>> a = np.random.randn(9, 6)
  677. >>> q, r = np.linalg.qr(a)
  678. >>> np.allclose(a, np.dot(q, r)) # a does equal qr
  679. True
  680. >>> r2 = np.linalg.qr(a, mode='r')
  681. >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
  682. True
  683. Example illustrating a common use of `qr`: solving of least squares
  684. problems
  685. What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
  686. the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
  687. and you'll see that it should be y0 = 0, m = 1.) The answer is provided
  688. by solving the over-determined matrix equation ``Ax = b``, where::
  689. A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
  690. x = array([[y0], [m]])
  691. b = array([[1], [0], [2], [1]])
  692. If A = qr such that q is orthonormal (which is always possible via
  693. Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
  694. however, we simply use `lstsq`.)
  695. >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
  696. >>> A
  697. array([[0, 1],
  698. [1, 1],
  699. [1, 1],
  700. [2, 1]])
  701. >>> b = np.array([1, 0, 2, 1])
  702. >>> q, r = np.linalg.qr(A)
  703. >>> p = np.dot(q.T, b)
  704. >>> np.dot(np.linalg.inv(r), p)
  705. array([ 1.1e-16, 1.0e+00])
  706. """
  707. if mode not in ('reduced', 'complete', 'r', 'raw'):
  708. if mode in ('f', 'full'):
  709. # 2013-04-01, 1.8
  710. msg = "".join((
  711. "The 'full' option is deprecated in favor of 'reduced'.\n",
  712. "For backward compatibility let mode default."))
  713. warnings.warn(msg, DeprecationWarning, stacklevel=3)
  714. mode = 'reduced'
  715. elif mode in ('e', 'economic'):
  716. # 2013-04-01, 1.8
  717. msg = "The 'economic' option is deprecated."
  718. warnings.warn(msg, DeprecationWarning, stacklevel=3)
  719. mode = 'economic'
  720. else:
  721. raise ValueError("Unrecognized mode '%s'" % mode)
  722. a, wrap = _makearray(a)
  723. _assert_2d(a)
  724. m, n = a.shape
  725. t, result_t = _commonType(a)
  726. a = _fastCopyAndTranspose(t, a)
  727. a = _to_native_byte_order(a)
  728. mn = min(m, n)
  729. tau = zeros((mn,), t)
  730. if isComplexType(t):
  731. lapack_routine = lapack_lite.zgeqrf
  732. routine_name = 'zgeqrf'
  733. else:
  734. lapack_routine = lapack_lite.dgeqrf
  735. routine_name = 'dgeqrf'
  736. # calculate optimal size of work data 'work'
  737. lwork = 1
  738. work = zeros((lwork,), t)
  739. results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0)
  740. if results['info'] != 0:
  741. raise LinAlgError('%s returns %d' % (routine_name, results['info']))
  742. # do qr decomposition
  743. lwork = max(1, n, int(abs(work[0])))
  744. work = zeros((lwork,), t)
  745. results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0)
  746. if results['info'] != 0:
  747. raise LinAlgError('%s returns %d' % (routine_name, results['info']))
  748. # handle modes that don't return q
  749. if mode == 'r':
  750. r = _fastCopyAndTranspose(result_t, a[:, :mn])
  751. return wrap(triu(r))
  752. if mode == 'raw':
  753. return a, tau
  754. if mode == 'economic':
  755. if t != result_t :
  756. a = a.astype(result_t, copy=False)
  757. return wrap(a.T)
  758. # generate q from a
  759. if mode == 'complete' and m > n:
  760. mc = m
  761. q = empty((m, m), t)
  762. else:
  763. mc = mn
  764. q = empty((n, m), t)
  765. q[:n] = a
  766. if isComplexType(t):
  767. lapack_routine = lapack_lite.zungqr
  768. routine_name = 'zungqr'
  769. else:
  770. lapack_routine = lapack_lite.dorgqr
  771. routine_name = 'dorgqr'
  772. # determine optimal lwork
  773. lwork = 1
  774. work = zeros((lwork,), t)
  775. results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0)
  776. if results['info'] != 0:
  777. raise LinAlgError('%s returns %d' % (routine_name, results['info']))
  778. # compute q
  779. lwork = max(1, n, int(abs(work[0])))
  780. work = zeros((lwork,), t)
  781. results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0)
  782. if results['info'] != 0:
  783. raise LinAlgError('%s returns %d' % (routine_name, results['info']))
  784. q = _fastCopyAndTranspose(result_t, q[:mc])
  785. r = _fastCopyAndTranspose(result_t, a[:, :mc])
  786. return wrap(q), wrap(triu(r))
  787. # Eigenvalues
  788. @array_function_dispatch(_unary_dispatcher)
  789. def eigvals(a):
  790. """
  791. Compute the eigenvalues of a general matrix.
  792. Main difference between `eigvals` and `eig`: the eigenvectors aren't
  793. returned.
  794. Parameters
  795. ----------
  796. a : (..., M, M) array_like
  797. A complex- or real-valued matrix whose eigenvalues will be computed.
  798. Returns
  799. -------
  800. w : (..., M,) ndarray
  801. The eigenvalues, each repeated according to its multiplicity.
  802. They are not necessarily ordered, nor are they necessarily
  803. real for real matrices.
  804. Raises
  805. ------
  806. LinAlgError
  807. If the eigenvalue computation does not converge.
  808. See Also
  809. --------
  810. eig : eigenvalues and right eigenvectors of general arrays
  811. eigvalsh : eigenvalues of real symmetric or complex Hermitian
  812. (conjugate symmetric) arrays.
  813. eigh : eigenvalues and eigenvectors of real symmetric or complex
  814. Hermitian (conjugate symmetric) arrays.
  815. scipy.linalg.eigvals : Similar function in SciPy.
  816. Notes
  817. -----
  818. .. versionadded:: 1.8.0
  819. Broadcasting rules apply, see the `numpy.linalg` documentation for
  820. details.
  821. This is implemented using the ``_geev`` LAPACK routines which compute
  822. the eigenvalues and eigenvectors of general square arrays.
  823. Examples
  824. --------
  825. Illustration, using the fact that the eigenvalues of a diagonal matrix
  826. are its diagonal elements, that multiplying a matrix on the left
  827. by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
  828. of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
  829. if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
  830. ``A``:
  831. >>> from numpy import linalg as LA
  832. >>> x = np.random.random()
  833. >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
  834. >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
  835. (1.0, 1.0, 0.0)
  836. Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
  837. >>> D = np.diag((-1,1))
  838. >>> LA.eigvals(D)
  839. array([-1., 1.])
  840. >>> A = np.dot(Q, D)
  841. >>> A = np.dot(A, Q.T)
  842. >>> LA.eigvals(A)
  843. array([ 1., -1.]) # random
  844. """
  845. a, wrap = _makearray(a)
  846. _assert_stacked_2d(a)
  847. _assert_stacked_square(a)
  848. _assert_finite(a)
  849. t, result_t = _commonType(a)
  850. extobj = get_linalg_error_extobj(
  851. _raise_linalgerror_eigenvalues_nonconvergence)
  852. signature = 'D->D' if isComplexType(t) else 'd->D'
  853. w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
  854. if not isComplexType(t):
  855. if all(w.imag == 0):
  856. w = w.real
  857. result_t = _realType(result_t)
  858. else:
  859. result_t = _complexType(result_t)
  860. return w.astype(result_t, copy=False)
  861. def _eigvalsh_dispatcher(a, UPLO=None):
  862. return (a,)
  863. @array_function_dispatch(_eigvalsh_dispatcher)
  864. def eigvalsh(a, UPLO='L'):
  865. """
  866. Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
  867. Main difference from eigh: the eigenvectors are not computed.
  868. Parameters
  869. ----------
  870. a : (..., M, M) array_like
  871. A complex- or real-valued matrix whose eigenvalues are to be
  872. computed.
  873. UPLO : {'L', 'U'}, optional
  874. Specifies whether the calculation is done with the lower triangular
  875. part of `a` ('L', default) or the upper triangular part ('U').
  876. Irrespective of this value only the real parts of the diagonal will
  877. be considered in the computation to preserve the notion of a Hermitian
  878. matrix. It therefore follows that the imaginary part of the diagonal
  879. will always be treated as zero.
  880. Returns
  881. -------
  882. w : (..., M,) ndarray
  883. The eigenvalues in ascending order, each repeated according to
  884. its multiplicity.
  885. Raises
  886. ------
  887. LinAlgError
  888. If the eigenvalue computation does not converge.
  889. See Also
  890. --------
  891. eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
  892. (conjugate symmetric) arrays.
  893. eigvals : eigenvalues of general real or complex arrays.
  894. eig : eigenvalues and right eigenvectors of general real or complex
  895. arrays.
  896. scipy.linalg.eigvalsh : Similar function in SciPy.
  897. Notes
  898. -----
  899. .. versionadded:: 1.8.0
  900. Broadcasting rules apply, see the `numpy.linalg` documentation for
  901. details.
  902. The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
  903. Examples
  904. --------
  905. >>> from numpy import linalg as LA
  906. >>> a = np.array([[1, -2j], [2j, 5]])
  907. >>> LA.eigvalsh(a)
  908. array([ 0.17157288, 5.82842712]) # may vary
  909. >>> # demonstrate the treatment of the imaginary part of the diagonal
  910. >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
  911. >>> a
  912. array([[5.+2.j, 9.-2.j],
  913. [0.+2.j, 2.-1.j]])
  914. >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
  915. >>> # with:
  916. >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
  917. >>> b
  918. array([[5.+0.j, 0.-2.j],
  919. [0.+2.j, 2.+0.j]])
  920. >>> wa = LA.eigvalsh(a)
  921. >>> wb = LA.eigvals(b)
  922. >>> wa; wb
  923. array([1., 6.])
  924. array([6.+0.j, 1.+0.j])
  925. """
  926. UPLO = UPLO.upper()
  927. if UPLO not in ('L', 'U'):
  928. raise ValueError("UPLO argument must be 'L' or 'U'")
  929. extobj = get_linalg_error_extobj(
  930. _raise_linalgerror_eigenvalues_nonconvergence)
  931. if UPLO == 'L':
  932. gufunc = _umath_linalg.eigvalsh_lo
  933. else:
  934. gufunc = _umath_linalg.eigvalsh_up
  935. a, wrap = _makearray(a)
  936. _assert_stacked_2d(a)
  937. _assert_stacked_square(a)
  938. t, result_t = _commonType(a)
  939. signature = 'D->d' if isComplexType(t) else 'd->d'
  940. w = gufunc(a, signature=signature, extobj=extobj)
  941. return w.astype(_realType(result_t), copy=False)
  942. def _convertarray(a):
  943. t, result_t = _commonType(a)
  944. a = _fastCT(a.astype(t))
  945. return a, t, result_t
  946. # Eigenvectors
  947. @array_function_dispatch(_unary_dispatcher)
  948. def eig(a):
  949. """
  950. Compute the eigenvalues and right eigenvectors of a square array.
  951. Parameters
  952. ----------
  953. a : (..., M, M) array
  954. Matrices for which the eigenvalues and right eigenvectors will
  955. be computed
  956. Returns
  957. -------
  958. w : (..., M) array
  959. The eigenvalues, each repeated according to its multiplicity.
  960. The eigenvalues are not necessarily ordered. The resulting
  961. array will be of complex type, unless the imaginary part is
  962. zero in which case it will be cast to a real type. When `a`
  963. is real the resulting eigenvalues will be real (0 imaginary
  964. part) or occur in conjugate pairs
  965. v : (..., M, M) array
  966. The normalized (unit "length") eigenvectors, such that the
  967. column ``v[:,i]`` is the eigenvector corresponding to the
  968. eigenvalue ``w[i]``.
  969. Raises
  970. ------
  971. LinAlgError
  972. If the eigenvalue computation does not converge.
  973. See Also
  974. --------
  975. eigvals : eigenvalues of a non-symmetric array.
  976. eigh : eigenvalues and eigenvectors of a real symmetric or complex
  977. Hermitian (conjugate symmetric) array.
  978. eigvalsh : eigenvalues of a real symmetric or complex Hermitian
  979. (conjugate symmetric) array.
  980. scipy.linalg.eig : Similar function in SciPy that also solves the
  981. generalized eigenvalue problem.
  982. scipy.linalg.schur : Best choice for unitary and other non-Hermitian
  983. normal matrices.
  984. Notes
  985. -----
  986. .. versionadded:: 1.8.0
  987. Broadcasting rules apply, see the `numpy.linalg` documentation for
  988. details.
  989. This is implemented using the ``_geev`` LAPACK routines which compute
  990. the eigenvalues and eigenvectors of general square arrays.
  991. The number `w` is an eigenvalue of `a` if there exists a vector
  992. `v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
  993. `v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
  994. for :math:`i \\in \\{0,...,M-1\\}`.
  995. The array `v` of eigenvectors may not be of maximum rank, that is, some
  996. of the columns may be linearly dependent, although round-off error may
  997. obscure that fact. If the eigenvalues are all different, then theoretically
  998. the eigenvectors are linearly independent and `a` can be diagonalized by
  999. a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
  1000. For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
  1001. is preferred because the matrix `v` is guaranteed to be unitary, which is
  1002. not the case when using `eig`. The Schur factorization produces an
  1003. upper triangular matrix rather than a diagonal matrix, but for normal
  1004. matrices only the diagonal of the upper triangular matrix is needed, the
  1005. rest is roundoff error.
  1006. Finally, it is emphasized that `v` consists of the *right* (as in
  1007. right-hand side) eigenvectors of `a`. A vector `y` satisfying
  1008. ``y.T @ a = z * y.T`` for some number `z` is called a *left*
  1009. eigenvector of `a`, and, in general, the left and right eigenvectors
  1010. of a matrix are not necessarily the (perhaps conjugate) transposes
  1011. of each other.
  1012. References
  1013. ----------
  1014. G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
  1015. Academic Press, Inc., 1980, Various pp.
  1016. Examples
  1017. --------
  1018. >>> from numpy import linalg as LA
  1019. (Almost) trivial example with real e-values and e-vectors.
  1020. >>> w, v = LA.eig(np.diag((1, 2, 3)))
  1021. >>> w; v
  1022. array([1., 2., 3.])
  1023. array([[1., 0., 0.],
  1024. [0., 1., 0.],
  1025. [0., 0., 1.]])
  1026. Real matrix possessing complex e-values and e-vectors; note that the
  1027. e-values are complex conjugates of each other.
  1028. >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
  1029. >>> w; v
  1030. array([1.+1.j, 1.-1.j])
  1031. array([[0.70710678+0.j , 0.70710678-0.j ],
  1032. [0. -0.70710678j, 0. +0.70710678j]])
  1033. Complex-valued matrix with real e-values (but complex-valued e-vectors);
  1034. note that ``a.conj().T == a``, i.e., `a` is Hermitian.
  1035. >>> a = np.array([[1, 1j], [-1j, 1]])
  1036. >>> w, v = LA.eig(a)
  1037. >>> w; v
  1038. array([2.+0.j, 0.+0.j])
  1039. array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
  1040. [ 0.70710678+0.j , -0. +0.70710678j]])
  1041. Be careful about round-off error!
  1042. >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
  1043. >>> # Theor. e-values are 1 +/- 1e-9
  1044. >>> w, v = LA.eig(a)
  1045. >>> w; v
  1046. array([1., 1.])
  1047. array([[1., 0.],
  1048. [0., 1.]])
  1049. """
  1050. a, wrap = _makearray(a)
  1051. _assert_stacked_2d(a)
  1052. _assert_stacked_square(a)
  1053. _assert_finite(a)
  1054. t, result_t = _commonType(a)
  1055. extobj = get_linalg_error_extobj(
  1056. _raise_linalgerror_eigenvalues_nonconvergence)
  1057. signature = 'D->DD' if isComplexType(t) else 'd->DD'
  1058. w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
  1059. if not isComplexType(t) and all(w.imag == 0.0):
  1060. w = w.real
  1061. vt = vt.real
  1062. result_t = _realType(result_t)
  1063. else:
  1064. result_t = _complexType(result_t)
  1065. vt = vt.astype(result_t, copy=False)
  1066. return w.astype(result_t, copy=False), wrap(vt)
  1067. @array_function_dispatch(_eigvalsh_dispatcher)
  1068. def eigh(a, UPLO='L'):
  1069. """
  1070. Return the eigenvalues and eigenvectors of a complex Hermitian
  1071. (conjugate symmetric) or a real symmetric matrix.
  1072. Returns two objects, a 1-D array containing the eigenvalues of `a`, and
  1073. a 2-D square array or matrix (depending on the input type) of the
  1074. corresponding eigenvectors (in columns).
  1075. Parameters
  1076. ----------
  1077. a : (..., M, M) array
  1078. Hermitian or real symmetric matrices whose eigenvalues and
  1079. eigenvectors are to be computed.
  1080. UPLO : {'L', 'U'}, optional
  1081. Specifies whether the calculation is done with the lower triangular
  1082. part of `a` ('L', default) or the upper triangular part ('U').
  1083. Irrespective of this value only the real parts of the diagonal will
  1084. be considered in the computation to preserve the notion of a Hermitian
  1085. matrix. It therefore follows that the imaginary part of the diagonal
  1086. will always be treated as zero.
  1087. Returns
  1088. -------
  1089. w : (..., M) ndarray
  1090. The eigenvalues in ascending order, each repeated according to
  1091. its multiplicity.
  1092. v : {(..., M, M) ndarray, (..., M, M) matrix}
  1093. The column ``v[:, i]`` is the normalized eigenvector corresponding
  1094. to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
  1095. a matrix object.
  1096. Raises
  1097. ------
  1098. LinAlgError
  1099. If the eigenvalue computation does not converge.
  1100. See Also
  1101. --------
  1102. eigvalsh : eigenvalues of real symmetric or complex Hermitian
  1103. (conjugate symmetric) arrays.
  1104. eig : eigenvalues and right eigenvectors for non-symmetric arrays.
  1105. eigvals : eigenvalues of non-symmetric arrays.
  1106. scipy.linalg.eigh : Similar function in SciPy (but also solves the
  1107. generalized eigenvalue problem).
  1108. Notes
  1109. -----
  1110. .. versionadded:: 1.8.0
  1111. Broadcasting rules apply, see the `numpy.linalg` documentation for
  1112. details.
  1113. The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
  1114. ``_heevd``.
  1115. The eigenvalues of real symmetric or complex Hermitian matrices are
  1116. always real. [1]_ The array `v` of (column) eigenvectors is unitary
  1117. and `a`, `w`, and `v` satisfy the equations
  1118. ``dot(a, v[:, i]) = w[i] * v[:, i]``.
  1119. References
  1120. ----------
  1121. .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
  1122. FL, Academic Press, Inc., 1980, pg. 222.
  1123. Examples
  1124. --------
  1125. >>> from numpy import linalg as LA
  1126. >>> a = np.array([[1, -2j], [2j, 5]])
  1127. >>> a
  1128. array([[ 1.+0.j, -0.-2.j],
  1129. [ 0.+2.j, 5.+0.j]])
  1130. >>> w, v = LA.eigh(a)
  1131. >>> w; v
  1132. array([0.17157288, 5.82842712])
  1133. array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
  1134. [ 0. +0.38268343j, 0. -0.92387953j]])
  1135. >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
  1136. array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
  1137. >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
  1138. array([0.+0.j, 0.+0.j])
  1139. >>> A = np.matrix(a) # what happens if input is a matrix object
  1140. >>> A
  1141. matrix([[ 1.+0.j, -0.-2.j],
  1142. [ 0.+2.j, 5.+0.j]])
  1143. >>> w, v = LA.eigh(A)
  1144. >>> w; v
  1145. array([0.17157288, 5.82842712])
  1146. matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
  1147. [ 0. +0.38268343j, 0. -0.92387953j]])
  1148. >>> # demonstrate the treatment of the imaginary part of the diagonal
  1149. >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
  1150. >>> a
  1151. array([[5.+2.j, 9.-2.j],
  1152. [0.+2.j, 2.-1.j]])
  1153. >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
  1154. >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
  1155. >>> b
  1156. array([[5.+0.j, 0.-2.j],
  1157. [0.+2.j, 2.+0.j]])
  1158. >>> wa, va = LA.eigh(a)
  1159. >>> wb, vb = LA.eig(b)
  1160. >>> wa; wb
  1161. array([1., 6.])
  1162. array([6.+0.j, 1.+0.j])
  1163. >>> va; vb
  1164. array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
  1165. [ 0. +0.89442719j, 0. -0.4472136j ]])
  1166. array([[ 0.89442719+0.j , -0. +0.4472136j],
  1167. [-0. +0.4472136j, 0.89442719+0.j ]])
  1168. """
  1169. UPLO = UPLO.upper()
  1170. if UPLO not in ('L', 'U'):
  1171. raise ValueError("UPLO argument must be 'L' or 'U'")
  1172. a, wrap = _makearray(a)
  1173. _assert_stacked_2d(a)
  1174. _assert_stacked_square(a)
  1175. t, result_t = _commonType(a)
  1176. extobj = get_linalg_error_extobj(
  1177. _raise_linalgerror_eigenvalues_nonconvergence)
  1178. if UPLO == 'L':
  1179. gufunc = _umath_linalg.eigh_lo
  1180. else:
  1181. gufunc = _umath_linalg.eigh_up
  1182. signature = 'D->dD' if isComplexType(t) else 'd->dd'
  1183. w, vt = gufunc(a, signature=signature, extobj=extobj)
  1184. w = w.astype(_realType(result_t), copy=False)
  1185. vt = vt.astype(result_t, copy=False)
  1186. return w, wrap(vt)
  1187. # Singular value decomposition
  1188. def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
  1189. return (a,)
  1190. @array_function_dispatch(_svd_dispatcher)
  1191. def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
  1192. """
  1193. Singular Value Decomposition.
  1194. When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
  1195. = (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
  1196. array of `a`'s singular values. When `a` is higher-dimensional, SVD is
  1197. applied in stacked mode as explained below.
  1198. Parameters
  1199. ----------
  1200. a : (..., M, N) array_like
  1201. A real or complex array with ``a.ndim >= 2``.
  1202. full_matrices : bool, optional
  1203. If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
  1204. ``(..., N, N)``, respectively. Otherwise, the shapes are
  1205. ``(..., M, K)`` and ``(..., K, N)``, respectively, where
  1206. ``K = min(M, N)``.
  1207. compute_uv : bool, optional
  1208. Whether or not to compute `u` and `vh` in addition to `s`. True
  1209. by default.
  1210. hermitian : bool, optional
  1211. If True, `a` is assumed to be Hermitian (symmetric if real-valued),
  1212. enabling a more efficient method for finding singular values.
  1213. Defaults to False.
  1214. .. versionadded:: 1.17.0
  1215. Returns
  1216. -------
  1217. u : { (..., M, M), (..., M, K) } array
  1218. Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
  1219. size as those of the input `a`. The size of the last two dimensions
  1220. depends on the value of `full_matrices`. Only returned when
  1221. `compute_uv` is True.
  1222. s : (..., K) array
  1223. Vector(s) with the singular values, within each vector sorted in
  1224. descending order. The first ``a.ndim - 2`` dimensions have the same
  1225. size as those of the input `a`.
  1226. vh : { (..., N, N), (..., K, N) } array
  1227. Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
  1228. size as those of the input `a`. The size of the last two dimensions
  1229. depends on the value of `full_matrices`. Only returned when
  1230. `compute_uv` is True.
  1231. Raises
  1232. ------
  1233. LinAlgError
  1234. If SVD computation does not converge.
  1235. See Also
  1236. --------
  1237. scipy.linalg.svd : Similar function in SciPy.
  1238. scipy.linalg.svdvals : Compute singular values of a matrix.
  1239. Notes
  1240. -----
  1241. .. versionchanged:: 1.8.0
  1242. Broadcasting rules apply, see the `numpy.linalg` documentation for
  1243. details.
  1244. The decomposition is performed using LAPACK routine ``_gesdd``.
  1245. SVD is usually described for the factorization of a 2D matrix :math:`A`.
  1246. The higher-dimensional case will be discussed below. In the 2D case, SVD is
  1247. written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
  1248. :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
  1249. contains the singular values of `a` and `u` and `vh` are unitary. The rows
  1250. of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
  1251. the eigenvectors of :math:`A A^H`. In both cases the corresponding
  1252. (possibly non-zero) eigenvalues are given by ``s**2``.
  1253. If `a` has more than two dimensions, then broadcasting rules apply, as
  1254. explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
  1255. working in "stacked" mode: it iterates over all indices of the first
  1256. ``a.ndim - 2`` dimensions and for each combination SVD is applied to the
  1257. last two indices. The matrix `a` can be reconstructed from the
  1258. decomposition with either ``(u * s[..., None, :]) @ vh`` or
  1259. ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
  1260. function ``np.matmul`` for python versions below 3.5.)
  1261. If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
  1262. all the return values.
  1263. Examples
  1264. --------
  1265. >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
  1266. >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
  1267. Reconstruction based on full SVD, 2D case:
  1268. >>> u, s, vh = np.linalg.svd(a, full_matrices=True)
  1269. >>> u.shape, s.shape, vh.shape
  1270. ((9, 9), (6,), (6, 6))
  1271. >>> np.allclose(a, np.dot(u[:, :6] * s, vh))
  1272. True
  1273. >>> smat = np.zeros((9, 6), dtype=complex)
  1274. >>> smat[:6, :6] = np.diag(s)
  1275. >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
  1276. True
  1277. Reconstruction based on reduced SVD, 2D case:
  1278. >>> u, s, vh = np.linalg.svd(a, full_matrices=False)
  1279. >>> u.shape, s.shape, vh.shape
  1280. ((9, 6), (6,), (6, 6))
  1281. >>> np.allclose(a, np.dot(u * s, vh))
  1282. True
  1283. >>> smat = np.diag(s)
  1284. >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
  1285. True
  1286. Reconstruction based on full SVD, 4D case:
  1287. >>> u, s, vh = np.linalg.svd(b, full_matrices=True)
  1288. >>> u.shape, s.shape, vh.shape
  1289. ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
  1290. >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
  1291. True
  1292. >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
  1293. True
  1294. Reconstruction based on reduced SVD, 4D case:
  1295. >>> u, s, vh = np.linalg.svd(b, full_matrices=False)
  1296. >>> u.shape, s.shape, vh.shape
  1297. ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
  1298. >>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
  1299. True
  1300. >>> np.allclose(b, np.matmul(u, s[..., None] * vh))
  1301. True
  1302. """
  1303. import numpy as _nx
  1304. a, wrap = _makearray(a)
  1305. if hermitian:
  1306. # note: lapack svd returns eigenvalues with s ** 2 sorted descending,
  1307. # but eig returns s sorted ascending, so we re-order the eigenvalues
  1308. # and related arrays to have the correct order
  1309. if compute_uv:
  1310. s, u = eigh(a)
  1311. sgn = sign(s)
  1312. s = abs(s)
  1313. sidx = argsort(s)[..., ::-1]
  1314. sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
  1315. s = _nx.take_along_axis(s, sidx, axis=-1)
  1316. u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
  1317. # singular values are unsigned, move the sign into v
  1318. vt = transpose(u * sgn[..., None, :]).conjugate()
  1319. return wrap(u), s, wrap(vt)
  1320. else:
  1321. s = eigvalsh(a)
  1322. s = s[..., ::-1]
  1323. s = abs(s)
  1324. return sort(s)[..., ::-1]
  1325. _assert_stacked_2d(a)
  1326. t, result_t = _commonType(a)
  1327. extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
  1328. m, n = a.shape[-2:]
  1329. if compute_uv:
  1330. if full_matrices:
  1331. if m < n:
  1332. gufunc = _umath_linalg.svd_m_f
  1333. else:
  1334. gufunc = _umath_linalg.svd_n_f
  1335. else:
  1336. if m < n:
  1337. gufunc = _umath_linalg.svd_m_s
  1338. else:
  1339. gufunc = _umath_linalg.svd_n_s
  1340. signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
  1341. u, s, vh = gufunc(a, signature=signature, extobj=extobj)
  1342. u = u.astype(result_t, copy=False)
  1343. s = s.astype(_realType(result_t), copy=False)
  1344. vh = vh.astype(result_t, copy=False)
  1345. return wrap(u), s, wrap(vh)
  1346. else:
  1347. if m < n:
  1348. gufunc = _umath_linalg.svd_m
  1349. else:
  1350. gufunc = _umath_linalg.svd_n
  1351. signature = 'D->d' if isComplexType(t) else 'd->d'
  1352. s = gufunc(a, signature=signature, extobj=extobj)
  1353. s = s.astype(_realType(result_t), copy=False)
  1354. return s
  1355. def _cond_dispatcher(x, p=None):
  1356. return (x,)
  1357. @array_function_dispatch(_cond_dispatcher)
  1358. def cond(x, p=None):
  1359. """
  1360. Compute the condition number of a matrix.
  1361. This function is capable of returning the condition number using
  1362. one of seven different norms, depending on the value of `p` (see
  1363. Parameters below).
  1364. Parameters
  1365. ----------
  1366. x : (..., M, N) array_like
  1367. The matrix whose condition number is sought.
  1368. p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
  1369. Order of the norm:
  1370. ===== ============================
  1371. p norm for matrices
  1372. ===== ============================
  1373. None 2-norm, computed directly using the ``SVD``
  1374. 'fro' Frobenius norm
  1375. inf max(sum(abs(x), axis=1))
  1376. -inf min(sum(abs(x), axis=1))
  1377. 1 max(sum(abs(x), axis=0))
  1378. -1 min(sum(abs(x), axis=0))
  1379. 2 2-norm (largest sing. value)
  1380. -2 smallest singular value
  1381. ===== ============================
  1382. inf means the numpy.inf object, and the Frobenius norm is
  1383. the root-of-sum-of-squares norm.
  1384. Returns
  1385. -------
  1386. c : {float, inf}
  1387. The condition number of the matrix. May be infinite.
  1388. See Also
  1389. --------
  1390. numpy.linalg.norm
  1391. Notes
  1392. -----
  1393. The condition number of `x` is defined as the norm of `x` times the
  1394. norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
  1395. (root-of-sum-of-squares) or one of a number of other matrix norms.
  1396. References
  1397. ----------
  1398. .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
  1399. Academic Press, Inc., 1980, pg. 285.
  1400. Examples
  1401. --------
  1402. >>> from numpy import linalg as LA
  1403. >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
  1404. >>> a
  1405. array([[ 1, 0, -1],
  1406. [ 0, 1, 0],
  1407. [ 1, 0, 1]])
  1408. >>> LA.cond(a)
  1409. 1.4142135623730951
  1410. >>> LA.cond(a, 'fro')
  1411. 3.1622776601683795
  1412. >>> LA.cond(a, np.inf)
  1413. 2.0
  1414. >>> LA.cond(a, -np.inf)
  1415. 1.0
  1416. >>> LA.cond(a, 1)
  1417. 2.0
  1418. >>> LA.cond(a, -1)
  1419. 1.0
  1420. >>> LA.cond(a, 2)
  1421. 1.4142135623730951
  1422. >>> LA.cond(a, -2)
  1423. 0.70710678118654746 # may vary
  1424. >>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
  1425. 0.70710678118654746 # may vary
  1426. """
  1427. x = asarray(x) # in case we have a matrix
  1428. if _is_empty_2d(x):
  1429. raise LinAlgError("cond is not defined on empty arrays")
  1430. if p is None or p == 2 or p == -2:
  1431. s = svd(x, compute_uv=False)
  1432. with errstate(all='ignore'):
  1433. if p == -2:
  1434. r = s[..., -1] / s[..., 0]
  1435. else:
  1436. r = s[..., 0] / s[..., -1]
  1437. else:
  1438. # Call inv(x) ignoring errors. The result array will
  1439. # contain nans in the entries where inversion failed.
  1440. _assert_stacked_2d(x)
  1441. _assert_stacked_square(x)
  1442. t, result_t = _commonType(x)
  1443. signature = 'D->D' if isComplexType(t) else 'd->d'
  1444. with errstate(all='ignore'):
  1445. invx = _umath_linalg.inv(x, signature=signature)
  1446. r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
  1447. r = r.astype(result_t, copy=False)
  1448. # Convert nans to infs unless the original array had nan entries
  1449. r = asarray(r)
  1450. nan_mask = isnan(r)
  1451. if nan_mask.any():
  1452. nan_mask &= ~isnan(x).any(axis=(-2, -1))
  1453. if r.ndim > 0:
  1454. r[nan_mask] = Inf
  1455. elif nan_mask:
  1456. r[()] = Inf
  1457. # Convention is to return scalars instead of 0d arrays
  1458. if r.ndim == 0:
  1459. r = r[()]
  1460. return r
  1461. def _matrix_rank_dispatcher(M, tol=None, hermitian=None):
  1462. return (M,)
  1463. @array_function_dispatch(_matrix_rank_dispatcher)
  1464. def matrix_rank(M, tol=None, hermitian=False):
  1465. """
  1466. Return matrix rank of array using SVD method
  1467. Rank of the array is the number of singular values of the array that are
  1468. greater than `tol`.
  1469. .. versionchanged:: 1.14
  1470. Can now operate on stacks of matrices
  1471. Parameters
  1472. ----------
  1473. M : {(M,), (..., M, N)} array_like
  1474. Input vector or stack of matrices.
  1475. tol : (...) array_like, float, optional
  1476. Threshold below which SVD values are considered zero. If `tol` is
  1477. None, and ``S`` is an array with singular values for `M`, and
  1478. ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
  1479. set to ``S.max() * max(M.shape) * eps``.
  1480. .. versionchanged:: 1.14
  1481. Broadcasted against the stack of matrices
  1482. hermitian : bool, optional
  1483. If True, `M` is assumed to be Hermitian (symmetric if real-valued),
  1484. enabling a more efficient method for finding singular values.
  1485. Defaults to False.
  1486. .. versionadded:: 1.14
  1487. Returns
  1488. -------
  1489. rank : (...) array_like
  1490. Rank of M.
  1491. Notes
  1492. -----
  1493. The default threshold to detect rank deficiency is a test on the magnitude
  1494. of the singular values of `M`. By default, we identify singular values less
  1495. than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
  1496. the symbols defined above). This is the algorithm MATLAB uses [1]. It also
  1497. appears in *Numerical recipes* in the discussion of SVD solutions for linear
  1498. least squares [2].
  1499. This default threshold is designed to detect rank deficiency accounting for
  1500. the numerical errors of the SVD computation. Imagine that there is a column
  1501. in `M` that is an exact (in floating point) linear combination of other
  1502. columns in `M`. Computing the SVD on `M` will not produce a singular value
  1503. exactly equal to 0 in general: any difference of the smallest SVD value from
  1504. 0 will be caused by numerical imprecision in the calculation of the SVD.
  1505. Our threshold for small SVD values takes this numerical imprecision into
  1506. account, and the default threshold will detect such numerical rank
  1507. deficiency. The threshold may declare a matrix `M` rank deficient even if
  1508. the linear combination of some columns of `M` is not exactly equal to
  1509. another column of `M` but only numerically very close to another column of
  1510. `M`.
  1511. We chose our default threshold because it is in wide use. Other thresholds
  1512. are possible. For example, elsewhere in the 2007 edition of *Numerical
  1513. recipes* there is an alternative threshold of ``S.max() *
  1514. np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
  1515. this threshold as being based on "expected roundoff error" (p 71).
  1516. The thresholds above deal with floating point roundoff error in the
  1517. calculation of the SVD. However, you may have more information about the
  1518. sources of error in `M` that would make you consider other tolerance values
  1519. to detect *effective* rank deficiency. The most useful measure of the
  1520. tolerance depends on the operations you intend to use on your matrix. For
  1521. example, if your data come from uncertain measurements with uncertainties
  1522. greater than floating point epsilon, choosing a tolerance near that
  1523. uncertainty may be preferable. The tolerance may be absolute if the
  1524. uncertainties are absolute rather than relative.
  1525. References
  1526. ----------
  1527. .. [1] MATLAB reference documention, "Rank"
  1528. https://www.mathworks.com/help/techdoc/ref/rank.html
  1529. .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
  1530. "Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
  1531. page 795.
  1532. Examples
  1533. --------
  1534. >>> from numpy.linalg import matrix_rank
  1535. >>> matrix_rank(np.eye(4)) # Full rank matrix
  1536. 4
  1537. >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
  1538. >>> matrix_rank(I)
  1539. 3
  1540. >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
  1541. 1
  1542. >>> matrix_rank(np.zeros((4,)))
  1543. 0
  1544. """
  1545. M = asarray(M)
  1546. if M.ndim < 2:
  1547. return int(not all(M==0))
  1548. S = svd(M, compute_uv=False, hermitian=hermitian)
  1549. if tol is None:
  1550. tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
  1551. else:
  1552. tol = asarray(tol)[..., newaxis]
  1553. return count_nonzero(S > tol, axis=-1)
  1554. # Generalized inverse
  1555. def _pinv_dispatcher(a, rcond=None, hermitian=None):
  1556. return (a,)
  1557. @array_function_dispatch(_pinv_dispatcher)
  1558. def pinv(a, rcond=1e-15, hermitian=False):
  1559. """
  1560. Compute the (Moore-Penrose) pseudo-inverse of a matrix.
  1561. Calculate the generalized inverse of a matrix using its
  1562. singular-value decomposition (SVD) and including all
  1563. *large* singular values.
  1564. .. versionchanged:: 1.14
  1565. Can now operate on stacks of matrices
  1566. Parameters
  1567. ----------
  1568. a : (..., M, N) array_like
  1569. Matrix or stack of matrices to be pseudo-inverted.
  1570. rcond : (...) array_like of float
  1571. Cutoff for small singular values.
  1572. Singular values less than or equal to
  1573. ``rcond * largest_singular_value`` are set to zero.
  1574. Broadcasts against the stack of matrices.
  1575. hermitian : bool, optional
  1576. If True, `a` is assumed to be Hermitian (symmetric if real-valued),
  1577. enabling a more efficient method for finding singular values.
  1578. Defaults to False.
  1579. .. versionadded:: 1.17.0
  1580. Returns
  1581. -------
  1582. B : (..., N, M) ndarray
  1583. The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
  1584. is `B`.
  1585. Raises
  1586. ------
  1587. LinAlgError
  1588. If the SVD computation does not converge.
  1589. See Also
  1590. --------
  1591. scipy.linalg.pinv : Similar function in SciPy.
  1592. scipy.linalg.pinv2 : Similar function in SciPy (SVD-based).
  1593. scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
  1594. Hermitian matrix.
  1595. Notes
  1596. -----
  1597. The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
  1598. defined as: "the matrix that 'solves' [the least-squares problem]
  1599. :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
  1600. :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
  1601. It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
  1602. value decomposition of A, then
  1603. :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
  1604. orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
  1605. of A's so-called singular values, (followed, typically, by
  1606. zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
  1607. consisting of the reciprocals of A's singular values
  1608. (again, followed by zeros). [1]_
  1609. References
  1610. ----------
  1611. .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
  1612. FL, Academic Press, Inc., 1980, pp. 139-142.
  1613. Examples
  1614. --------
  1615. The following example checks that ``a * a+ * a == a`` and
  1616. ``a+ * a * a+ == a+``:
  1617. >>> a = np.random.randn(9, 6)
  1618. >>> B = np.linalg.pinv(a)
  1619. >>> np.allclose(a, np.dot(a, np.dot(B, a)))
  1620. True
  1621. >>> np.allclose(B, np.dot(B, np.dot(a, B)))
  1622. True
  1623. """
  1624. a, wrap = _makearray(a)
  1625. rcond = asarray(rcond)
  1626. if _is_empty_2d(a):
  1627. m, n = a.shape[-2:]
  1628. res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
  1629. return wrap(res)
  1630. a = a.conjugate()
  1631. u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
  1632. # discard small singular values
  1633. cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
  1634. large = s > cutoff
  1635. s = divide(1, s, where=large, out=s)
  1636. s[~large] = 0
  1637. res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
  1638. return wrap(res)
  1639. # Determinant
  1640. @array_function_dispatch(_unary_dispatcher)
  1641. def slogdet(a):
  1642. """
  1643. Compute the sign and (natural) logarithm of the determinant of an array.
  1644. If an array has a very small or very large determinant, then a call to
  1645. `det` may overflow or underflow. This routine is more robust against such
  1646. issues, because it computes the logarithm of the determinant rather than
  1647. the determinant itself.
  1648. Parameters
  1649. ----------
  1650. a : (..., M, M) array_like
  1651. Input array, has to be a square 2-D array.
  1652. Returns
  1653. -------
  1654. sign : (...) array_like
  1655. A number representing the sign of the determinant. For a real matrix,
  1656. this is 1, 0, or -1. For a complex matrix, this is a complex number
  1657. with absolute value 1 (i.e., it is on the unit circle), or else 0.
  1658. logdet : (...) array_like
  1659. The natural log of the absolute value of the determinant.
  1660. If the determinant is zero, then `sign` will be 0 and `logdet` will be
  1661. -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
  1662. See Also
  1663. --------
  1664. det
  1665. Notes
  1666. -----
  1667. .. versionadded:: 1.8.0
  1668. Broadcasting rules apply, see the `numpy.linalg` documentation for
  1669. details.
  1670. .. versionadded:: 1.6.0
  1671. The determinant is computed via LU factorization using the LAPACK
  1672. routine ``z/dgetrf``.
  1673. Examples
  1674. --------
  1675. The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
  1676. >>> a = np.array([[1, 2], [3, 4]])
  1677. >>> (sign, logdet) = np.linalg.slogdet(a)
  1678. >>> (sign, logdet)
  1679. (-1, 0.69314718055994529) # may vary
  1680. >>> sign * np.exp(logdet)
  1681. -2.0
  1682. Computing log-determinants for a stack of matrices:
  1683. >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
  1684. >>> a.shape
  1685. (3, 2, 2)
  1686. >>> sign, logdet = np.linalg.slogdet(a)
  1687. >>> (sign, logdet)
  1688. (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
  1689. >>> sign * np.exp(logdet)
  1690. array([-2., -3., -8.])
  1691. This routine succeeds where ordinary `det` does not:
  1692. >>> np.linalg.det(np.eye(500) * 0.1)
  1693. 0.0
  1694. >>> np.linalg.slogdet(np.eye(500) * 0.1)
  1695. (1, -1151.2925464970228)
  1696. """
  1697. a = asarray(a)
  1698. _assert_stacked_2d(a)
  1699. _assert_stacked_square(a)
  1700. t, result_t = _commonType(a)
  1701. real_t = _realType(result_t)
  1702. signature = 'D->Dd' if isComplexType(t) else 'd->dd'
  1703. sign, logdet = _umath_linalg.slogdet(a, signature=signature)
  1704. sign = sign.astype(result_t, copy=False)
  1705. logdet = logdet.astype(real_t, copy=False)
  1706. return sign, logdet
  1707. @array_function_dispatch(_unary_dispatcher)
  1708. def det(a):
  1709. """
  1710. Compute the determinant of an array.
  1711. Parameters
  1712. ----------
  1713. a : (..., M, M) array_like
  1714. Input array to compute determinants for.
  1715. Returns
  1716. -------
  1717. det : (...) array_like
  1718. Determinant of `a`.
  1719. See Also
  1720. --------
  1721. slogdet : Another way to represent the determinant, more suitable
  1722. for large matrices where underflow/overflow may occur.
  1723. scipy.linalg.det : Similar function in SciPy.
  1724. Notes
  1725. -----
  1726. .. versionadded:: 1.8.0
  1727. Broadcasting rules apply, see the `numpy.linalg` documentation for
  1728. details.
  1729. The determinant is computed via LU factorization using the LAPACK
  1730. routine ``z/dgetrf``.
  1731. Examples
  1732. --------
  1733. The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
  1734. >>> a = np.array([[1, 2], [3, 4]])
  1735. >>> np.linalg.det(a)
  1736. -2.0 # may vary
  1737. Computing determinants for a stack of matrices:
  1738. >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
  1739. >>> a.shape
  1740. (3, 2, 2)
  1741. >>> np.linalg.det(a)
  1742. array([-2., -3., -8.])
  1743. """
  1744. a = asarray(a)
  1745. _assert_stacked_2d(a)
  1746. _assert_stacked_square(a)
  1747. t, result_t = _commonType(a)
  1748. signature = 'D->D' if isComplexType(t) else 'd->d'
  1749. r = _umath_linalg.det(a, signature=signature)
  1750. r = r.astype(result_t, copy=False)
  1751. return r
  1752. # Linear Least Squares
  1753. def _lstsq_dispatcher(a, b, rcond=None):
  1754. return (a, b)
  1755. @array_function_dispatch(_lstsq_dispatcher)
  1756. def lstsq(a, b, rcond="warn"):
  1757. r"""
  1758. Return the least-squares solution to a linear matrix equation.
  1759. Computes the vector x that approximatively solves the equation
  1760. ``a @ x = b``. The equation may be under-, well-, or over-determined
  1761. (i.e., the number of linearly independent rows of `a` can be less than,
  1762. equal to, or greater than its number of linearly independent columns).
  1763. If `a` is square and of full rank, then `x` (but for round-off error)
  1764. is the "exact" solution of the equation. Else, `x` minimizes the
  1765. Euclidean 2-norm :math:`|| b - a x ||`.
  1766. Parameters
  1767. ----------
  1768. a : (M, N) array_like
  1769. "Coefficient" matrix.
  1770. b : {(M,), (M, K)} array_like
  1771. Ordinate or "dependent variable" values. If `b` is two-dimensional,
  1772. the least-squares solution is calculated for each of the `K` columns
  1773. of `b`.
  1774. rcond : float, optional
  1775. Cut-off ratio for small singular values of `a`.
  1776. For the purposes of rank determination, singular values are treated
  1777. as zero if they are smaller than `rcond` times the largest singular
  1778. value of `a`.
  1779. .. versionchanged:: 1.14.0
  1780. If not set, a FutureWarning is given. The previous default
  1781. of ``-1`` will use the machine precision as `rcond` parameter,
  1782. the new default will use the machine precision times `max(M, N)`.
  1783. To silence the warning and use the new default, use ``rcond=None``,
  1784. to keep using the old behavior, use ``rcond=-1``.
  1785. Returns
  1786. -------
  1787. x : {(N,), (N, K)} ndarray
  1788. Least-squares solution. If `b` is two-dimensional,
  1789. the solutions are in the `K` columns of `x`.
  1790. residuals : {(1,), (K,), (0,)} ndarray
  1791. Sums of residuals; squared Euclidean 2-norm for each column in
  1792. ``b - a*x``.
  1793. If the rank of `a` is < N or M <= N, this is an empty array.
  1794. If `b` is 1-dimensional, this is a (1,) shape array.
  1795. Otherwise the shape is (K,).
  1796. rank : int
  1797. Rank of matrix `a`.
  1798. s : (min(M, N),) ndarray
  1799. Singular values of `a`.
  1800. Raises
  1801. ------
  1802. LinAlgError
  1803. If computation does not converge.
  1804. See Also
  1805. --------
  1806. scipy.linalg.lstsq : Similar function in SciPy.
  1807. Notes
  1808. -----
  1809. If `b` is a matrix, then all array results are returned as matrices.
  1810. Examples
  1811. --------
  1812. Fit a line, ``y = mx + c``, through some noisy data-points:
  1813. >>> x = np.array([0, 1, 2, 3])
  1814. >>> y = np.array([-1, 0.2, 0.9, 2.1])
  1815. By examining the coefficients, we see that the line should have a
  1816. gradient of roughly 1 and cut the y-axis at, more or less, -1.
  1817. We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
  1818. and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
  1819. >>> A = np.vstack([x, np.ones(len(x))]).T
  1820. >>> A
  1821. array([[ 0., 1.],
  1822. [ 1., 1.],
  1823. [ 2., 1.],
  1824. [ 3., 1.]])
  1825. >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
  1826. >>> m, c
  1827. (1.0 -0.95) # may vary
  1828. Plot the data along with the fitted line:
  1829. >>> import matplotlib.pyplot as plt
  1830. >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
  1831. >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
  1832. >>> _ = plt.legend()
  1833. >>> plt.show()
  1834. """
  1835. a, _ = _makearray(a)
  1836. b, wrap = _makearray(b)
  1837. is_1d = b.ndim == 1
  1838. if is_1d:
  1839. b = b[:, newaxis]
  1840. _assert_2d(a, b)
  1841. m, n = a.shape[-2:]
  1842. m2, n_rhs = b.shape[-2:]
  1843. if m != m2:
  1844. raise LinAlgError('Incompatible dimensions')
  1845. t, result_t = _commonType(a, b)
  1846. # FIXME: real_t is unused
  1847. real_t = _linalgRealType(t)
  1848. result_real_t = _realType(result_t)
  1849. # Determine default rcond value
  1850. if rcond == "warn":
  1851. # 2017-08-19, 1.14.0
  1852. warnings.warn("`rcond` parameter will change to the default of "
  1853. "machine precision times ``max(M, N)`` where M and N "
  1854. "are the input matrix dimensions.\n"
  1855. "To use the future default and silence this warning "
  1856. "we advise to pass `rcond=None`, to keep using the old, "
  1857. "explicitly pass `rcond=-1`.",
  1858. FutureWarning, stacklevel=3)
  1859. rcond = -1
  1860. if rcond is None:
  1861. rcond = finfo(t).eps * max(n, m)
  1862. if m <= n:
  1863. gufunc = _umath_linalg.lstsq_m
  1864. else:
  1865. gufunc = _umath_linalg.lstsq_n
  1866. signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
  1867. extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
  1868. if n_rhs == 0:
  1869. # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
  1870. b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
  1871. x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
  1872. if m == 0:
  1873. x[...] = 0
  1874. if n_rhs == 0:
  1875. # remove the item we added
  1876. x = x[..., :n_rhs]
  1877. resids = resids[..., :n_rhs]
  1878. # remove the axis we added
  1879. if is_1d:
  1880. x = x.squeeze(axis=-1)
  1881. # we probably should squeeze resids too, but we can't
  1882. # without breaking compatibility.
  1883. # as documented
  1884. if rank != n or m <= n:
  1885. resids = array([], result_real_t)
  1886. # coerce output arrays
  1887. s = s.astype(result_real_t, copy=False)
  1888. resids = resids.astype(result_real_t, copy=False)
  1889. x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
  1890. return wrap(x), wrap(resids), rank, s
  1891. def _multi_svd_norm(x, row_axis, col_axis, op):
  1892. """Compute a function of the singular values of the 2-D matrices in `x`.
  1893. This is a private utility function used by `numpy.linalg.norm()`.
  1894. Parameters
  1895. ----------
  1896. x : ndarray
  1897. row_axis, col_axis : int
  1898. The axes of `x` that hold the 2-D matrices.
  1899. op : callable
  1900. This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
  1901. Returns
  1902. -------
  1903. result : float or ndarray
  1904. If `x` is 2-D, the return values is a float.
  1905. Otherwise, it is an array with ``x.ndim - 2`` dimensions.
  1906. The return values are either the minimum or maximum or sum of the
  1907. singular values of the matrices, depending on whether `op`
  1908. is `numpy.amin` or `numpy.amax` or `numpy.sum`.
  1909. """
  1910. y = moveaxis(x, (row_axis, col_axis), (-2, -1))
  1911. result = op(svd(y, compute_uv=False), axis=-1)
  1912. return result
  1913. def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
  1914. return (x,)
  1915. @array_function_dispatch(_norm_dispatcher)
  1916. def norm(x, ord=None, axis=None, keepdims=False):
  1917. """
  1918. Matrix or vector norm.
  1919. This function is able to return one of eight different matrix norms,
  1920. or one of an infinite number of vector norms (described below), depending
  1921. on the value of the ``ord`` parameter.
  1922. Parameters
  1923. ----------
  1924. x : array_like
  1925. Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
  1926. is None. If both `axis` and `ord` are None, the 2-norm of
  1927. ``x.ravel`` will be returned.
  1928. ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
  1929. Order of the norm (see table under ``Notes``). inf means numpy's
  1930. `inf` object. The default is None.
  1931. axis : {None, int, 2-tuple of ints}, optional.
  1932. If `axis` is an integer, it specifies the axis of `x` along which to
  1933. compute the vector norms. If `axis` is a 2-tuple, it specifies the
  1934. axes that hold 2-D matrices, and the matrix norms of these matrices
  1935. are computed. If `axis` is None then either a vector norm (when `x`
  1936. is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
  1937. is None.
  1938. .. versionadded:: 1.8.0
  1939. keepdims : bool, optional
  1940. If this is set to True, the axes which are normed over are left in the
  1941. result as dimensions with size one. With this option the result will
  1942. broadcast correctly against the original `x`.
  1943. .. versionadded:: 1.10.0
  1944. Returns
  1945. -------
  1946. n : float or ndarray
  1947. Norm of the matrix or vector(s).
  1948. See Also
  1949. --------
  1950. scipy.linalg.norm : Similar function in SciPy.
  1951. Notes
  1952. -----
  1953. For values of ``ord < 1``, the result is, strictly speaking, not a
  1954. mathematical 'norm', but it may still be useful for various numerical
  1955. purposes.
  1956. The following norms can be calculated:
  1957. ===== ============================ ==========================
  1958. ord norm for matrices norm for vectors
  1959. ===== ============================ ==========================
  1960. None Frobenius norm 2-norm
  1961. 'fro' Frobenius norm --
  1962. 'nuc' nuclear norm --
  1963. inf max(sum(abs(x), axis=1)) max(abs(x))
  1964. -inf min(sum(abs(x), axis=1)) min(abs(x))
  1965. 0 -- sum(x != 0)
  1966. 1 max(sum(abs(x), axis=0)) as below
  1967. -1 min(sum(abs(x), axis=0)) as below
  1968. 2 2-norm (largest sing. value) as below
  1969. -2 smallest singular value as below
  1970. other -- sum(abs(x)**ord)**(1./ord)
  1971. ===== ============================ ==========================
  1972. The Frobenius norm is given by [1]_:
  1973. :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
  1974. The nuclear norm is the sum of the singular values.
  1975. Both the Frobenius and nuclear norm orders are only defined for
  1976. matrices and raise a ValueError when ``x.ndim != 2``.
  1977. References
  1978. ----------
  1979. .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
  1980. Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
  1981. Examples
  1982. --------
  1983. >>> from numpy import linalg as LA
  1984. >>> a = np.arange(9) - 4
  1985. >>> a
  1986. array([-4, -3, -2, ..., 2, 3, 4])
  1987. >>> b = a.reshape((3, 3))
  1988. >>> b
  1989. array([[-4, -3, -2],
  1990. [-1, 0, 1],
  1991. [ 2, 3, 4]])
  1992. >>> LA.norm(a)
  1993. 7.745966692414834
  1994. >>> LA.norm(b)
  1995. 7.745966692414834
  1996. >>> LA.norm(b, 'fro')
  1997. 7.745966692414834
  1998. >>> LA.norm(a, np.inf)
  1999. 4.0
  2000. >>> LA.norm(b, np.inf)
  2001. 9.0
  2002. >>> LA.norm(a, -np.inf)
  2003. 0.0
  2004. >>> LA.norm(b, -np.inf)
  2005. 2.0
  2006. >>> LA.norm(a, 1)
  2007. 20.0
  2008. >>> LA.norm(b, 1)
  2009. 7.0
  2010. >>> LA.norm(a, -1)
  2011. -4.6566128774142013e-010
  2012. >>> LA.norm(b, -1)
  2013. 6.0
  2014. >>> LA.norm(a, 2)
  2015. 7.745966692414834
  2016. >>> LA.norm(b, 2)
  2017. 7.3484692283495345
  2018. >>> LA.norm(a, -2)
  2019. 0.0
  2020. >>> LA.norm(b, -2)
  2021. 1.8570331885190563e-016 # may vary
  2022. >>> LA.norm(a, 3)
  2023. 5.8480354764257312 # may vary
  2024. >>> LA.norm(a, -3)
  2025. 0.0
  2026. Using the `axis` argument to compute vector norms:
  2027. >>> c = np.array([[ 1, 2, 3],
  2028. ... [-1, 1, 4]])
  2029. >>> LA.norm(c, axis=0)
  2030. array([ 1.41421356, 2.23606798, 5. ])
  2031. >>> LA.norm(c, axis=1)
  2032. array([ 3.74165739, 4.24264069])
  2033. >>> LA.norm(c, ord=1, axis=1)
  2034. array([ 6., 6.])
  2035. Using the `axis` argument to compute matrix norms:
  2036. >>> m = np.arange(8).reshape(2,2,2)
  2037. >>> LA.norm(m, axis=(1,2))
  2038. array([ 3.74165739, 11.22497216])
  2039. >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
  2040. (3.7416573867739413, 11.224972160321824)
  2041. """
  2042. x = asarray(x)
  2043. if not issubclass(x.dtype.type, (inexact, object_)):
  2044. x = x.astype(float)
  2045. # Immediately handle some default, simple, fast, and common cases.
  2046. if axis is None:
  2047. ndim = x.ndim
  2048. if ((ord is None) or
  2049. (ord in ('f', 'fro') and ndim == 2) or
  2050. (ord == 2 and ndim == 1)):
  2051. x = x.ravel(order='K')
  2052. if isComplexType(x.dtype.type):
  2053. sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
  2054. else:
  2055. sqnorm = dot(x, x)
  2056. ret = sqrt(sqnorm)
  2057. if keepdims:
  2058. ret = ret.reshape(ndim*[1])
  2059. return ret
  2060. # Normalize the `axis` argument to a tuple.
  2061. nd = x.ndim
  2062. if axis is None:
  2063. axis = tuple(range(nd))
  2064. elif not isinstance(axis, tuple):
  2065. try:
  2066. axis = int(axis)
  2067. except Exception as e:
  2068. raise TypeError("'axis' must be None, an integer or a tuple of integers") from e
  2069. axis = (axis,)
  2070. if len(axis) == 1:
  2071. if ord == Inf:
  2072. return abs(x).max(axis=axis, keepdims=keepdims)
  2073. elif ord == -Inf:
  2074. return abs(x).min(axis=axis, keepdims=keepdims)
  2075. elif ord == 0:
  2076. # Zero norm
  2077. return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
  2078. elif ord == 1:
  2079. # special case for speedup
  2080. return add.reduce(abs(x), axis=axis, keepdims=keepdims)
  2081. elif ord is None or ord == 2:
  2082. # special case for speedup
  2083. s = (x.conj() * x).real
  2084. return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
  2085. # None of the str-type keywords for ord ('fro', 'nuc')
  2086. # are valid for vectors
  2087. elif isinstance(ord, str):
  2088. raise ValueError(f"Invalid norm order '{ord}' for vectors")
  2089. else:
  2090. absx = abs(x)
  2091. absx **= ord
  2092. ret = add.reduce(absx, axis=axis, keepdims=keepdims)
  2093. ret **= (1 / ord)
  2094. return ret
  2095. elif len(axis) == 2:
  2096. row_axis, col_axis = axis
  2097. row_axis = normalize_axis_index(row_axis, nd)
  2098. col_axis = normalize_axis_index(col_axis, nd)
  2099. if row_axis == col_axis:
  2100. raise ValueError('Duplicate axes given.')
  2101. if ord == 2:
  2102. ret = _multi_svd_norm(x, row_axis, col_axis, amax)
  2103. elif ord == -2:
  2104. ret = _multi_svd_norm(x, row_axis, col_axis, amin)
  2105. elif ord == 1:
  2106. if col_axis > row_axis:
  2107. col_axis -= 1
  2108. ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
  2109. elif ord == Inf:
  2110. if row_axis > col_axis:
  2111. row_axis -= 1
  2112. ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
  2113. elif ord == -1:
  2114. if col_axis > row_axis:
  2115. col_axis -= 1
  2116. ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
  2117. elif ord == -Inf:
  2118. if row_axis > col_axis:
  2119. row_axis -= 1
  2120. ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
  2121. elif ord in [None, 'fro', 'f']:
  2122. ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
  2123. elif ord == 'nuc':
  2124. ret = _multi_svd_norm(x, row_axis, col_axis, sum)
  2125. else:
  2126. raise ValueError("Invalid norm order for matrices.")
  2127. if keepdims:
  2128. ret_shape = list(x.shape)
  2129. ret_shape[axis[0]] = 1
  2130. ret_shape[axis[1]] = 1
  2131. ret = ret.reshape(ret_shape)
  2132. return ret
  2133. else:
  2134. raise ValueError("Improper number of dimensions to norm.")
  2135. # multi_dot
  2136. def _multidot_dispatcher(arrays, *, out=None):
  2137. yield from arrays
  2138. yield out
  2139. @array_function_dispatch(_multidot_dispatcher)
  2140. def multi_dot(arrays, *, out=None):
  2141. """
  2142. Compute the dot product of two or more arrays in a single function call,
  2143. while automatically selecting the fastest evaluation order.
  2144. `multi_dot` chains `numpy.dot` and uses optimal parenthesization
  2145. of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
  2146. this can speed up the multiplication a lot.
  2147. If the first argument is 1-D it is treated as a row vector.
  2148. If the last argument is 1-D it is treated as a column vector.
  2149. The other arguments must be 2-D.
  2150. Think of `multi_dot` as::
  2151. def multi_dot(arrays): return functools.reduce(np.dot, arrays)
  2152. Parameters
  2153. ----------
  2154. arrays : sequence of array_like
  2155. If the first argument is 1-D it is treated as row vector.
  2156. If the last argument is 1-D it is treated as column vector.
  2157. The other arguments must be 2-D.
  2158. out : ndarray, optional
  2159. Output argument. This must have the exact kind that would be returned
  2160. if it was not used. In particular, it must have the right type, must be
  2161. C-contiguous, and its dtype must be the dtype that would be returned
  2162. for `dot(a, b)`. This is a performance feature. Therefore, if these
  2163. conditions are not met, an exception is raised, instead of attempting
  2164. to be flexible.
  2165. .. versionadded:: 1.19.0
  2166. Returns
  2167. -------
  2168. output : ndarray
  2169. Returns the dot product of the supplied arrays.
  2170. See Also
  2171. --------
  2172. dot : dot multiplication with two arguments.
  2173. References
  2174. ----------
  2175. .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
  2176. .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
  2177. Examples
  2178. --------
  2179. `multi_dot` allows you to write::
  2180. >>> from numpy.linalg import multi_dot
  2181. >>> # Prepare some data
  2182. >>> A = np.random.random((10000, 100))
  2183. >>> B = np.random.random((100, 1000))
  2184. >>> C = np.random.random((1000, 5))
  2185. >>> D = np.random.random((5, 333))
  2186. >>> # the actual dot multiplication
  2187. >>> _ = multi_dot([A, B, C, D])
  2188. instead of::
  2189. >>> _ = np.dot(np.dot(np.dot(A, B), C), D)
  2190. >>> # or
  2191. >>> _ = A.dot(B).dot(C).dot(D)
  2192. Notes
  2193. -----
  2194. The cost for a matrix multiplication can be calculated with the
  2195. following function::
  2196. def cost(A, B):
  2197. return A.shape[0] * A.shape[1] * B.shape[1]
  2198. Assume we have three matrices
  2199. :math:`A_{10x100}, B_{100x5}, C_{5x50}`.
  2200. The costs for the two different parenthesizations are as follows::
  2201. cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
  2202. cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
  2203. """
  2204. n = len(arrays)
  2205. # optimization only makes sense for len(arrays) > 2
  2206. if n < 2:
  2207. raise ValueError("Expecting at least two arrays.")
  2208. elif n == 2:
  2209. return dot(arrays[0], arrays[1], out=out)
  2210. arrays = [asanyarray(a) for a in arrays]
  2211. # save original ndim to reshape the result array into the proper form later
  2212. ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
  2213. # Explicitly convert vectors to 2D arrays to keep the logic of the internal
  2214. # _multi_dot_* functions as simple as possible.
  2215. if arrays[0].ndim == 1:
  2216. arrays[0] = atleast_2d(arrays[0])
  2217. if arrays[-1].ndim == 1:
  2218. arrays[-1] = atleast_2d(arrays[-1]).T
  2219. _assert_2d(*arrays)
  2220. # _multi_dot_three is much faster than _multi_dot_matrix_chain_order
  2221. if n == 3:
  2222. result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)
  2223. else:
  2224. order = _multi_dot_matrix_chain_order(arrays)
  2225. result = _multi_dot(arrays, order, 0, n - 1, out=out)
  2226. # return proper shape
  2227. if ndim_first == 1 and ndim_last == 1:
  2228. return result[0, 0] # scalar
  2229. elif ndim_first == 1 or ndim_last == 1:
  2230. return result.ravel() # 1-D
  2231. else:
  2232. return result
  2233. def _multi_dot_three(A, B, C, out=None):
  2234. """
  2235. Find the best order for three arrays and do the multiplication.
  2236. For three arguments `_multi_dot_three` is approximately 15 times faster
  2237. than `_multi_dot_matrix_chain_order`
  2238. """
  2239. a0, a1b0 = A.shape
  2240. b1c0, c1 = C.shape
  2241. # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
  2242. cost1 = a0 * b1c0 * (a1b0 + c1)
  2243. # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
  2244. cost2 = a1b0 * c1 * (a0 + b1c0)
  2245. if cost1 < cost2:
  2246. return dot(dot(A, B), C, out=out)
  2247. else:
  2248. return dot(A, dot(B, C), out=out)
  2249. def _multi_dot_matrix_chain_order(arrays, return_costs=False):
  2250. """
  2251. Return a np.array that encodes the optimal order of mutiplications.
  2252. The optimal order array is then used by `_multi_dot()` to do the
  2253. multiplication.
  2254. Also return the cost matrix if `return_costs` is `True`
  2255. The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
  2256. Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
  2257. cost[i, j] = min([
  2258. cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
  2259. for k in range(i, j)])
  2260. """
  2261. n = len(arrays)
  2262. # p stores the dimensions of the matrices
  2263. # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
  2264. p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
  2265. # m is a matrix of costs of the subproblems
  2266. # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
  2267. m = zeros((n, n), dtype=double)
  2268. # s is the actual ordering
  2269. # s[i, j] is the value of k at which we split the product A_i..A_j
  2270. s = empty((n, n), dtype=intp)
  2271. for l in range(1, n):
  2272. for i in range(n - l):
  2273. j = i + l
  2274. m[i, j] = Inf
  2275. for k in range(i, j):
  2276. q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
  2277. if q < m[i, j]:
  2278. m[i, j] = q
  2279. s[i, j] = k # Note that Cormen uses 1-based index
  2280. return (s, m) if return_costs else s
  2281. def _multi_dot(arrays, order, i, j, out=None):
  2282. """Actually do the multiplication with the given order."""
  2283. if i == j:
  2284. # the initial call with non-None out should never get here
  2285. assert out is None
  2286. return arrays[i]
  2287. else:
  2288. return dot(_multi_dot(arrays, order, i, order[i, j]),
  2289. _multi_dot(arrays, order, order[i, j] + 1, j),
  2290. out=out)