1from types import FunctionType
2from collections import Counter
3
4from mpmath import mp, workprec
5from mpmath.libmp.libmpf import prec_to_dps
6
7from sympy.core.compatibility import default_sort_key
8from sympy.core.evalf import DEFAULT_MAXPREC, PrecisionExhausted
9from sympy.core.logic import fuzzy_and, fuzzy_or
10from sympy.core.numbers import Float
11from sympy.core.sympify import _sympify
12from sympy.functions.elementary.miscellaneous import sqrt
13from sympy.polys import roots, CRootOf, ZZ, QQ, EX
14from sympy.polys.matrices import DomainMatrix
15from sympy.polys.matrices.eigen import dom_eigenvects, dom_eigenvects_to_sympy
16from sympy.simplify import nsimplify, simplify as _simplify
17from sympy.utilities.exceptions import SymPyDeprecationWarning
18
19from .common import MatrixError, NonSquareMatrixError
20from .determinant import _find_reasonable_pivot
21
22from .utilities import _iszero
23
24
25def _eigenvals_eigenvects_mpmath(M):
26    norm2 = lambda v: mp.sqrt(sum(i**2 for i in v))
27
28    v1 = None
29    prec = max([x._prec for x in M.atoms(Float)])
30    eps = 2**-prec
31
32    while prec < DEFAULT_MAXPREC:
33        with workprec(prec):
34            A = mp.matrix(M.evalf(n=prec_to_dps(prec)))
35            E, ER = mp.eig(A)
36            v2 = norm2([i for e in E for i in (mp.re(e), mp.im(e))])
37            if v1 is not None and mp.fabs(v1 - v2) < eps:
38                return E, ER
39            v1 = v2
40        prec *= 2
41
42    # we get here because the next step would have taken us
43    # past MAXPREC or because we never took a step; in case
44    # of the latter, we refuse to send back a solution since
45    # it would not have been verified; we also resist taking
46    # a small step to arrive exactly at MAXPREC since then
47    # the two calculations might be artificially close.
48    raise PrecisionExhausted
49
50
51def _eigenvals_mpmath(M, multiple=False):
52    """Compute eigenvalues using mpmath"""
53    E, _ = _eigenvals_eigenvects_mpmath(M)
54    result = [_sympify(x) for x in E]
55    if multiple:
56        return result
57    return dict(Counter(result))
58
59
60def _eigenvects_mpmath(M):
61    E, ER = _eigenvals_eigenvects_mpmath(M)
62    result = []
63    for i in range(M.rows):
64        eigenval = _sympify(E[i])
65        eigenvect = _sympify(ER[:, i])
66        result.append((eigenval, 1, [eigenvect]))
67
68    return result
69
70
71# This function is a candidate for caching if it gets implemented for matrices.
72def _eigenvals(
73    M, error_when_incomplete=True, *, simplify=False, multiple=False,
74    rational=False, **flags):
75    r"""Compute eigenvalues of the matrix.
76
77    Parameters
78    ==========
79
80    error_when_incomplete : bool, optional
81        If it is set to ``True``, it will raise an error if not all
82        eigenvalues are computed. This is caused by ``roots`` not returning
83        a full list of eigenvalues.
84
85    simplify : bool or function, optional
86        If it is set to ``True``, it attempts to return the most
87        simplified form of expressions returned by applying default
88        simplification method in every routine.
89
90        If it is set to ``False``, it will skip simplification in this
91        particular routine to save computation resources.
92
93        If a function is passed to, it will attempt to apply
94        the particular function as simplification method.
95
96    rational : bool, optional
97        If it is set to ``True``, every floating point numbers would be
98        replaced with rationals before computation. It can solve some
99        issues of ``roots`` routine not working well with floats.
100
101    multiple : bool, optional
102        If it is set to ``True``, the result will be in the form of a
103        list.
104
105        If it is set to ``False``, the result will be in the form of a
106        dictionary.
107
108    Returns
109    =======
110
111    eigs : list or dict
112        Eigenvalues of a matrix. The return format would be specified by
113        the key ``multiple``.
114
115    Raises
116    ======
117
118    MatrixError
119        If not enough roots had got computed.
120
121    NonSquareMatrixError
122        If attempted to compute eigenvalues from a non-square matrix.
123
124    Examples
125    ========
126
127    >>> from sympy.matrices import Matrix
128    >>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1])
129    >>> M.eigenvals()
130    {-1: 1, 0: 1, 2: 1}
131
132    See Also
133    ========
134
135    MatrixDeterminant.charpoly
136    eigenvects
137
138    Notes
139    =====
140
141    Eigenvalues of a matrix $A$ can be computed by solving a matrix
142    equation $\det(A - \lambda I) = 0$
143
144    It's not always possible to return radical solutions for
145    eigenvalues for matrices larger than $4, 4$ shape due to
146    Abel-Ruffini theorem.
147
148    If there is no radical solution is found for the eigenvalue,
149    it may return eigenvalues in the form of
150    :class:`sympy.polys.rootoftools.ComplexRootOf`.
151    """
152    if not M:
153        if multiple:
154            return []
155        return {}
156
157    if not M.is_square:
158        raise NonSquareMatrixError("{} must be a square matrix.".format(M))
159
160    if M._rep.domain not in (ZZ, QQ):
161        # Skip this check for ZZ/QQ because it can be slow
162        if all(x.is_number for x in M) and M.has(Float):
163            return _eigenvals_mpmath(M, multiple=multiple)
164
165    if rational:
166        M = M.applyfunc(
167            lambda x: nsimplify(x, rational=True) if x.has(Float) else x)
168
169    if multiple:
170        return _eigenvals_list(
171            M, error_when_incomplete=error_when_incomplete, simplify=simplify,
172            **flags)
173    return _eigenvals_dict(
174        M, error_when_incomplete=error_when_incomplete, simplify=simplify,
175        **flags)
176
177
178eigenvals_error_message = \
179"It is not always possible to express the eigenvalues of a matrix " + \
180"of size 5x5 or higher in radicals. " + \
181"We have CRootOf, but domains other than the rationals are not " + \
182"currently supported. " + \
183"If there are no symbols in the matrix, " + \
184"it should still be possible to compute numeric approximations " + \
185"of the eigenvalues using " + \
186"M.evalf().eigenvals() or M.charpoly().nroots()."
187
188
189def _eigenvals_list(
190    M, error_when_incomplete=True, simplify=False, **flags):
191    iblocks = M.strongly_connected_components()
192    all_eigs = []
193    is_dom = M._rep.domain in (ZZ, QQ)
194    for b in iblocks:
195
196        # Fast path for a 1x1 block:
197        if is_dom and len(b) == 1:
198            index = b[0]
199            val = M[index, index]
200            all_eigs.append(val)
201            continue
202
203        block = M[b, b]
204
205        if isinstance(simplify, FunctionType):
206            charpoly = block.charpoly(simplify=simplify)
207        else:
208            charpoly = block.charpoly()
209
210        eigs = roots(charpoly, multiple=True, **flags)
211
212        if len(eigs) != block.rows:
213            degree = int(charpoly.degree())
214            f = charpoly.as_expr()
215            x = charpoly.gen
216            try:
217                eigs = [CRootOf(f, x, idx) for idx in range(degree)]
218            except NotImplementedError:
219                if error_when_incomplete:
220                    raise MatrixError(eigenvals_error_message)
221                else:
222                    eigs = []
223
224        all_eigs += eigs
225
226    if not simplify:
227        return all_eigs
228    if not isinstance(simplify, FunctionType):
229        simplify = _simplify
230    return [simplify(value) for value in all_eigs]
231
232
233def _eigenvals_dict(
234    M, error_when_incomplete=True, simplify=False, **flags):
235    iblocks = M.strongly_connected_components()
236    all_eigs = {}
237    is_dom = M._rep.domain in (ZZ, QQ)
238    for b in iblocks:
239
240        # Fast path for a 1x1 block:
241        if is_dom and len(b) == 1:
242            index = b[0]
243            val = M[index, index]
244            all_eigs[val] = all_eigs.get(val, 0) + 1
245            continue
246
247        block = M[b, b]
248
249        if isinstance(simplify, FunctionType):
250            charpoly = block.charpoly(simplify=simplify)
251        else:
252            charpoly = block.charpoly()
253
254        eigs = roots(charpoly, multiple=False, **flags)
255
256        if sum(eigs.values()) != block.rows:
257            degree = int(charpoly.degree())
258            f = charpoly.as_expr()
259            x = charpoly.gen
260            try:
261                eigs = {CRootOf(f, x, idx): 1 for idx in range(degree)}
262            except NotImplementedError:
263                if error_when_incomplete:
264                    raise MatrixError(eigenvals_error_message)
265                else:
266                    eigs = {}
267
268        for k, v in eigs.items():
269            if k in all_eigs:
270                all_eigs[k] += v
271            else:
272                all_eigs[k] = v
273
274    if not simplify:
275        return all_eigs
276    if not isinstance(simplify, FunctionType):
277        simplify = _simplify
278    return {simplify(key): value for key, value in all_eigs.items()}
279
280
281def _eigenspace(M, eigenval, iszerofunc=_iszero, simplify=False):
282    """Get a basis for the eigenspace for a particular eigenvalue"""
283    m   = M - M.eye(M.rows) * eigenval
284    ret = m.nullspace(iszerofunc=iszerofunc)
285
286    # The nullspace for a real eigenvalue should be non-trivial.
287    # If we didn't find an eigenvector, try once more a little harder
288    if len(ret) == 0 and simplify:
289        ret = m.nullspace(iszerofunc=iszerofunc, simplify=True)
290    if len(ret) == 0:
291        raise NotImplementedError(
292            "Can't evaluate eigenvector for eigenvalue {}".format(eigenval))
293    return ret
294
295
296def _eigenvects_DOM(M, **kwargs):
297    DOM = DomainMatrix.from_Matrix(M, field=True, extension=True)
298    DOM = DOM.to_dense()
299
300    if DOM.domain != EX:
301        rational, algebraic = dom_eigenvects(DOM)
302        eigenvects = dom_eigenvects_to_sympy(
303            rational, algebraic, M.__class__, **kwargs)
304        eigenvects = sorted(eigenvects, key=lambda x: default_sort_key(x[0]))
305
306        return eigenvects
307    return None
308
309
310def _eigenvects_sympy(M, iszerofunc, simplify=True, **flags):
311    eigenvals = M.eigenvals(rational=False, **flags)
312
313    # Make sure that we have all roots in radical form
314    for x in eigenvals:
315        if x.has(CRootOf):
316            raise MatrixError(
317                "Eigenvector computation is not implemented if the matrix have "
318                "eigenvalues in CRootOf form")
319
320    eigenvals = sorted(eigenvals.items(), key=default_sort_key)
321    ret = []
322    for val, mult in eigenvals:
323        vects = _eigenspace(M, val, iszerofunc=iszerofunc, simplify=simplify)
324        ret.append((val, mult, vects))
325    return ret
326
327
328# This functions is a candidate for caching if it gets implemented for matrices.
329def _eigenvects(M, error_when_incomplete=True, iszerofunc=_iszero, *, chop=False, **flags):
330    """Compute eigenvectors of the matrix.
331
332    Parameters
333    ==========
334
335    error_when_incomplete : bool, optional
336        Raise an error when not all eigenvalues are computed. This is
337        caused by ``roots`` not returning a full list of eigenvalues.
338
339    iszerofunc : function, optional
340        Specifies a zero testing function to be used in ``rref``.
341
342        Default value is ``_iszero``, which uses SymPy's naive and fast
343        default assumption handler.
344
345        It can also accept any user-specified zero testing function, if it
346        is formatted as a function which accepts a single symbolic argument
347        and returns ``True`` if it is tested as zero and ``False`` if it
348        is tested as non-zero, and ``None`` if it is undecidable.
349
350    simplify : bool or function, optional
351        If ``True``, ``as_content_primitive()`` will be used to tidy up
352        normalization artifacts.
353
354        It will also be used by the ``nullspace`` routine.
355
356    chop : bool or positive number, optional
357        If the matrix contains any Floats, they will be changed to Rationals
358        for computation purposes, but the answers will be returned after
359        being evaluated with evalf. The ``chop`` flag is passed to ``evalf``.
360        When ``chop=True`` a default precision will be used; a number will
361        be interpreted as the desired level of precision.
362
363    Returns
364    =======
365
366    ret : [(eigenval, multiplicity, eigenspace), ...]
367        A ragged list containing tuples of data obtained by ``eigenvals``
368        and ``nullspace``.
369
370        ``eigenspace`` is a list containing the ``eigenvector`` for each
371        eigenvalue.
372
373        ``eigenvector`` is a vector in the form of a ``Matrix``. e.g.
374        a vector of length 3 is returned as ``Matrix([a_1, a_2, a_3])``.
375
376    Raises
377    ======
378
379    NotImplementedError
380        If failed to compute nullspace.
381
382    Examples
383    ========
384
385    >>> from sympy.matrices import Matrix
386    >>> M = Matrix(3, 3, [0, 1, 1, 1, 0, 0, 1, 1, 1])
387    >>> M.eigenvects()
388    [(-1, 1, [Matrix([
389    [-1],
390    [ 1],
391    [ 0]])]), (0, 1, [Matrix([
392    [ 0],
393    [-1],
394    [ 1]])]), (2, 1, [Matrix([
395    [2/3],
396    [1/3],
397    [  1]])])]
398
399    See Also
400    ========
401
402    eigenvals
403    MatrixSubspaces.nullspace
404    """
405    simplify = flags.get('simplify', True)
406    primitive = flags.get('simplify', False)
407    flags.pop('simplify', None)  # remove this if it's there
408    flags.pop('multiple', None)  # remove this if it's there
409
410    if not isinstance(simplify, FunctionType):
411        simpfunc = _simplify if simplify else lambda x: x
412
413    has_floats = M.has(Float)
414    if has_floats:
415        if all(x.is_number for x in M):
416            return _eigenvects_mpmath(M)
417        M = M.applyfunc(lambda x: nsimplify(x, rational=True))
418
419    ret = _eigenvects_DOM(M)
420    if ret is None:
421        ret = _eigenvects_sympy(M, iszerofunc, simplify=simplify, **flags)
422
423    if primitive:
424        # if the primitive flag is set, get rid of any common
425        # integer denominators
426        def denom_clean(l):
427            from sympy import gcd
428            return [(v / gcd(list(v))).applyfunc(simpfunc) for v in l]
429
430        ret = [(val, mult, denom_clean(es)) for val, mult, es in ret]
431
432    if has_floats:
433        # if we had floats to start with, turn the eigenvectors to floats
434        ret = [(val.evalf(chop=chop), mult, [v.evalf(chop=chop) for v in es])
435                for val, mult, es in ret]
436
437    return ret
438
439
440def _is_diagonalizable_with_eigen(M, reals_only=False):
441    """See _is_diagonalizable. This function returns the bool along with the
442    eigenvectors to avoid calculating them again in functions like
443    ``diagonalize``."""
444
445    if not M.is_square:
446        return False, []
447
448    eigenvecs = M.eigenvects(simplify=True)
449
450    for val, mult, basis in eigenvecs:
451        if reals_only and not val.is_real: # if we have a complex eigenvalue
452            return False, eigenvecs
453
454        if mult != len(basis): # if the geometric multiplicity doesn't equal the algebraic
455            return False, eigenvecs
456
457    return True, eigenvecs
458
459def _is_diagonalizable(M, reals_only=False, **kwargs):
460    """Returns ``True`` if a matrix is diagonalizable.
461
462    Parameters
463    ==========
464
465    reals_only : bool, optional
466        If ``True``, it tests whether the matrix can be diagonalized
467        to contain only real numbers on the diagonal.
468
469
470        If ``False``, it tests whether the matrix can be diagonalized
471        at all, even with numbers that may not be real.
472
473    Examples
474    ========
475
476    Example of a diagonalizable matrix:
477
478    >>> from sympy import Matrix
479    >>> M = Matrix([[1, 2, 0], [0, 3, 0], [2, -4, 2]])
480    >>> M.is_diagonalizable()
481    True
482
483    Example of a non-diagonalizable matrix:
484
485    >>> M = Matrix([[0, 1], [0, 0]])
486    >>> M.is_diagonalizable()
487    False
488
489    Example of a matrix that is diagonalized in terms of non-real entries:
490
491    >>> M = Matrix([[0, 1], [-1, 0]])
492    >>> M.is_diagonalizable(reals_only=False)
493    True
494    >>> M.is_diagonalizable(reals_only=True)
495    False
496
497    See Also
498    ========
499
500    is_diagonal
501    diagonalize
502    """
503
504    if 'clear_cache' in kwargs:
505        SymPyDeprecationWarning(
506            feature='clear_cache',
507            deprecated_since_version=1.4,
508            issue=15887
509        ).warn()
510
511    if 'clear_subproducts' in kwargs:
512        SymPyDeprecationWarning(
513            feature='clear_subproducts',
514            deprecated_since_version=1.4,
515            issue=15887
516        ).warn()
517
518    if not M.is_square:
519        return False
520
521    if all(e.is_real for e in M) and M.is_symmetric():
522        return True
523
524    if all(e.is_complex for e in M) and M.is_hermitian:
525        return True
526
527    return _is_diagonalizable_with_eigen(M, reals_only=reals_only)[0]
528
529
530#G&VL, Matrix Computations, Algo 5.4.2
531def _householder_vector(x):
532    if not x.cols == 1:
533        raise ValueError("Input must be a column matrix")
534    v = x.copy()
535    v_plus = x.copy()
536    v_minus = x.copy()
537    q = x[0, 0] / abs(x[0, 0])
538    norm_x = x.norm()
539    v_plus[0, 0] = x[0, 0] + q * norm_x
540    v_minus[0, 0] = x[0, 0] - q * norm_x
541    if x[1:, 0].norm() == 0:
542        bet = 0
543        v[0, 0] = 1
544    else:
545        if v_plus.norm() <= v_minus.norm():
546            v = v_plus
547        else:
548            v = v_minus
549        v = v / v[0]
550        bet = 2 / (v.norm() ** 2)
551    return v, bet
552
553
554def _bidiagonal_decmp_hholder(M):
555    m = M.rows
556    n = M.cols
557    A = M.as_mutable()
558    U, V = A.eye(m), A.eye(n)
559    for i in range(min(m, n)):
560        v, bet = _householder_vector(A[i:, i])
561        hh_mat = A.eye(m - i) - bet * v * v.H
562        A[i:, i:] = hh_mat * A[i:, i:]
563        temp = A.eye(m)
564        temp[i:, i:] = hh_mat
565        U = U * temp
566        if i + 1 <= n - 2:
567            v, bet = _householder_vector(A[i, i+1:].T)
568            hh_mat = A.eye(n - i - 1) - bet * v * v.H
569            A[i:, i+1:] = A[i:, i+1:] * hh_mat
570            temp = A.eye(n)
571            temp[i+1:, i+1:] = hh_mat
572            V = temp * V
573    return U, A, V
574
575
576def _eval_bidiag_hholder(M):
577    m = M.rows
578    n = M.cols
579    A = M.as_mutable()
580    for i in range(min(m, n)):
581        v, bet = _householder_vector(A[i:, i])
582        hh_mat = A.eye(m-i) - bet * v * v.H
583        A[i:, i:] = hh_mat * A[i:, i:]
584        if i + 1 <= n - 2:
585            v, bet = _householder_vector(A[i, i+1:].T)
586            hh_mat = A.eye(n - i - 1) - bet * v * v.H
587            A[i:, i+1:] = A[i:, i+1:] * hh_mat
588    return A
589
590
591def _bidiagonal_decomposition(M, upper=True):
592    """
593    Returns (U,B,V.H)
594
595    $A = UBV^{H}$
596
597    where A is the input matrix, and B is its Bidiagonalized form
598
599    Note: Bidiagonal Computation can hang for symbolic matrices.
600
601    Parameters
602    ==========
603
604    upper : bool. Whether to do upper bidiagnalization or lower.
605                True for upper and False for lower.
606
607    References
608    ==========
609
610    1. Algorith 5.4.2, Matrix computations by Golub and Van Loan, 4th edition
611    2. Complex Matrix Bidiagonalization : https://github.com/vslobody/Householder-Bidiagonalization
612
613    """
614
615    if type(upper) is not bool:
616        raise ValueError("upper must be a boolean")
617
618    if not upper:
619        X = _bidiagonal_decmp_hholder(M.H)
620        return X[2].H, X[1].H, X[0].H
621
622    return _bidiagonal_decmp_hholder(M)
623
624
625def _bidiagonalize(M, upper=True):
626    """
627    Returns $B$, the Bidiagonalized form of the input matrix.
628
629    Note: Bidiagonal Computation can hang for symbolic matrices.
630
631    Parameters
632    ==========
633
634    upper : bool. Whether to do upper bidiagnalization or lower.
635                True for upper and False for lower.
636
637    References
638    ==========
639
640    1. Algorith 5.4.2, Matrix computations by Golub and Van Loan, 4th edition
641    2. Complex Matrix Bidiagonalization : https://github.com/vslobody/Householder-Bidiagonalization
642
643    """
644
645    if type(upper) is not bool:
646        raise ValueError("upper must be a boolean")
647
648    if not upper:
649        return _eval_bidiag_hholder(M.H).H
650
651    return _eval_bidiag_hholder(M)
652
653
654def _diagonalize(M, reals_only=False, sort=False, normalize=False):
655    """
656    Return (P, D), where D is diagonal and
657
658        D = P^-1 * M * P
659
660    where M is current matrix.
661
662    Parameters
663    ==========
664
665    reals_only : bool. Whether to throw an error if complex numbers are need
666                    to diagonalize. (Default: False)
667
668    sort : bool. Sort the eigenvalues along the diagonal. (Default: False)
669
670    normalize : bool. If True, normalize the columns of P. (Default: False)
671
672    Examples
673    ========
674
675    >>> from sympy.matrices import Matrix
676    >>> M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
677    >>> M
678    Matrix([
679    [1,  2, 0],
680    [0,  3, 0],
681    [2, -4, 2]])
682    >>> (P, D) = M.diagonalize()
683    >>> D
684    Matrix([
685    [1, 0, 0],
686    [0, 2, 0],
687    [0, 0, 3]])
688    >>> P
689    Matrix([
690    [-1, 0, -1],
691    [ 0, 0, -1],
692    [ 2, 1,  2]])
693    >>> P.inv() * M * P
694    Matrix([
695    [1, 0, 0],
696    [0, 2, 0],
697    [0, 0, 3]])
698
699    See Also
700    ========
701
702    is_diagonal
703    is_diagonalizable
704    """
705
706    if not M.is_square:
707        raise NonSquareMatrixError()
708
709    is_diagonalizable, eigenvecs = _is_diagonalizable_with_eigen(M,
710                reals_only=reals_only)
711
712    if not is_diagonalizable:
713        raise MatrixError("Matrix is not diagonalizable")
714
715    if sort:
716        eigenvecs = sorted(eigenvecs, key=default_sort_key)
717
718    p_cols, diag = [], []
719
720    for val, mult, basis in eigenvecs:
721        diag   += [val] * mult
722        p_cols += basis
723
724    if normalize:
725        p_cols = [v / v.norm() for v in p_cols]
726
727    return M.hstack(*p_cols), M.diag(*diag)
728
729
730def _fuzzy_positive_definite(M):
731    positive_diagonals = M._has_positive_diagonals()
732    if positive_diagonals is False:
733        return False
734
735    if positive_diagonals and M.is_strongly_diagonally_dominant:
736        return True
737
738    return None
739
740
741def _fuzzy_positive_semidefinite(M):
742    nonnegative_diagonals = M._has_nonnegative_diagonals()
743    if nonnegative_diagonals is False:
744        return False
745
746    if nonnegative_diagonals and M.is_weakly_diagonally_dominant:
747        return True
748
749    return None
750
751
752def _is_positive_definite(M):
753    if not M.is_hermitian:
754        if not M.is_square:
755            return False
756        M = M + M.H
757
758    fuzzy = _fuzzy_positive_definite(M)
759    if fuzzy is not None:
760        return fuzzy
761
762    return _is_positive_definite_GE(M)
763
764
765def _is_positive_semidefinite(M):
766    if not M.is_hermitian:
767        if not M.is_square:
768            return False
769        M = M + M.H
770
771    fuzzy = _fuzzy_positive_semidefinite(M)
772    if fuzzy is not None:
773        return fuzzy
774
775    return _is_positive_semidefinite_cholesky(M)
776
777
778def _is_negative_definite(M):
779    return _is_positive_definite(-M)
780
781
782def _is_negative_semidefinite(M):
783    return _is_positive_semidefinite(-M)
784
785
786def _is_indefinite(M):
787    if M.is_hermitian:
788        eigen = M.eigenvals()
789        args1        = [x.is_positive for x in eigen.keys()]
790        any_positive = fuzzy_or(args1)
791        args2        = [x.is_negative for x in eigen.keys()]
792        any_negative = fuzzy_or(args2)
793
794        return fuzzy_and([any_positive, any_negative])
795
796    elif M.is_square:
797        return (M + M.H).is_indefinite
798
799    return False
800
801
802def _is_positive_definite_GE(M):
803    """A division-free gaussian elimination method for testing
804    positive-definiteness."""
805    M = M.as_mutable()
806    size = M.rows
807
808    for i in range(size):
809        is_positive = M[i, i].is_positive
810        if is_positive is not True:
811            return is_positive
812        for j in range(i+1, size):
813            M[j, i+1:] = M[i, i] * M[j, i+1:] - M[j, i] * M[i, i+1:]
814    return True
815
816
817def _is_positive_semidefinite_cholesky(M):
818    """Uses Cholesky factorization with complete pivoting
819
820    References
821    ==========
822
823    .. [1] http://eprints.ma.man.ac.uk/1199/1/covered/MIMS_ep2008_116.pdf
824
825    .. [2] https://www.value-at-risk.net/cholesky-factorization/
826    """
827    M = M.as_mutable()
828    for k in range(M.rows):
829        diags = [M[i, i] for i in range(k, M.rows)]
830        pivot, pivot_val, nonzero, _ = _find_reasonable_pivot(diags)
831
832        if nonzero:
833            return None
834
835        if pivot is None:
836            for i in range(k+1, M.rows):
837                for j in range(k, M.cols):
838                    iszero = M[i, j].is_zero
839                    if iszero is None:
840                        return None
841                    elif iszero is False:
842                        return False
843            return True
844
845        if M[k, k].is_negative or pivot_val.is_negative:
846            return False
847        elif not (M[k, k].is_nonnegative and pivot_val.is_nonnegative):
848            return None
849
850        if pivot > 0:
851            M.col_swap(k, k+pivot)
852            M.row_swap(k, k+pivot)
853
854        M[k, k] = sqrt(M[k, k])
855        M[k, k+1:] /= M[k, k]
856        M[k+1:, k+1:] -= M[k, k+1:].H * M[k, k+1:]
857
858    return M[-1, -1].is_nonnegative
859
860
861_doc_positive_definite = \
862    r"""Finds out the definiteness of a matrix.
863
864    Explanation
865    ===========
866
867    A square real matrix $A$ is:
868
869    - A positive definite matrix if $x^T A x > 0$
870      for all non-zero real vectors $x$.
871    - A positive semidefinite matrix if $x^T A x \geq 0$
872      for all non-zero real vectors $x$.
873    - A negative definite matrix if $x^T A x < 0$
874      for all non-zero real vectors $x$.
875    - A negative semidefinite matrix if $x^T A x \leq 0$
876      for all non-zero real vectors $x$.
877    - An indefinite matrix if there exists non-zero real vectors
878      $x, y$ with $x^T A x > 0 > y^T A y$.
879
880    A square complex matrix $A$ is:
881
882    - A positive definite matrix if $\text{re}(x^H A x) > 0$
883      for all non-zero complex vectors $x$.
884    - A positive semidefinite matrix if $\text{re}(x^H A x) \geq 0$
885      for all non-zero complex vectors $x$.
886    - A negative definite matrix if $\text{re}(x^H A x) < 0$
887      for all non-zero complex vectors $x$.
888    - A negative semidefinite matrix if $\text{re}(x^H A x) \leq 0$
889      for all non-zero complex vectors $x$.
890    - An indefinite matrix if there exists non-zero complex vectors
891      $x, y$ with $\text{re}(x^H A x) > 0 > \text{re}(y^H A y)$.
892
893    A matrix need not be symmetric or hermitian to be positive definite.
894
895    - A real non-symmetric matrix is positive definite if and only if
896      $\frac{A + A^T}{2}$ is positive definite.
897    - A complex non-hermitian matrix is positive definite if and only if
898      $\frac{A + A^H}{2}$ is positive definite.
899
900    And this extension can apply for all the definitions above.
901
902    However, for complex cases, you can restrict the definition of
903    $\text{re}(x^H A x) > 0$ to $x^H A x > 0$ and require the matrix
904    to be hermitian.
905    But we do not present this restriction for computation because you
906    can check ``M.is_hermitian`` independently with this and use
907    the same procedure.
908
909    Examples
910    ========
911
912    An example of symmetric positive definite matrix:
913
914    .. plot::
915        :context: reset
916        :format: doctest
917        :include-source: True
918
919        >>> from sympy import Matrix, symbols
920        >>> from sympy.plotting import plot3d
921        >>> a, b = symbols('a b')
922        >>> x = Matrix([a, b])
923
924        >>> A = Matrix([[1, 0], [0, 1]])
925        >>> A.is_positive_definite
926        True
927        >>> A.is_positive_semidefinite
928        True
929
930        >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
931
932    An example of symmetric positive semidefinite matrix:
933
934    .. plot::
935        :context: close-figs
936        :format: doctest
937        :include-source: True
938
939        >>> A = Matrix([[1, -1], [-1, 1]])
940        >>> A.is_positive_definite
941        False
942        >>> A.is_positive_semidefinite
943        True
944
945        >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
946
947    An example of symmetric negative definite matrix:
948
949    .. plot::
950        :context: close-figs
951        :format: doctest
952        :include-source: True
953
954        >>> A = Matrix([[-1, 0], [0, -1]])
955        >>> A.is_negative_definite
956        True
957        >>> A.is_negative_semidefinite
958        True
959        >>> A.is_indefinite
960        False
961
962        >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
963
964    An example of symmetric indefinite matrix:
965
966    .. plot::
967        :context: close-figs
968        :format: doctest
969        :include-source: True
970
971        >>> A = Matrix([[1, 2], [2, -1]])
972        >>> A.is_indefinite
973        True
974
975        >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
976
977    An example of non-symmetric positive definite matrix.
978
979    .. plot::
980        :context: close-figs
981        :format: doctest
982        :include-source: True
983
984        >>> A = Matrix([[1, 2], [-2, 1]])
985        >>> A.is_positive_definite
986        True
987        >>> A.is_positive_semidefinite
988        True
989
990        >>> p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
991
992    Notes
993    =====
994
995    Although some people trivialize the definition of positive definite
996    matrices only for symmetric or hermitian matrices, this restriction
997    is not correct because it does not classify all instances of
998    positive definite matrices from the definition $x^T A x > 0$ or
999    $\text{re}(x^H A x) > 0$.
1000
1001    For instance, ``Matrix([[1, 2], [-2, 1]])`` presented in
1002    the example above is an example of real positive definite matrix
1003    that is not symmetric.
1004
1005    However, since the following formula holds true;
1006
1007    .. math::
1008        \text{re}(x^H A x) > 0 \iff
1009        \text{re}(x^H \frac{A + A^H}{2} x) > 0
1010
1011    We can classify all positive definite matrices that may or may not
1012    be symmetric or hermitian by transforming the matrix to
1013    $\frac{A + A^T}{2}$ or $\frac{A + A^H}{2}$
1014    (which is guaranteed to be always real symmetric or complex
1015    hermitian) and we can defer most of the studies to symmetric or
1016    hermitian positive definite matrices.
1017
1018    But it is a different problem for the existance of Cholesky
1019    decomposition. Because even though a non symmetric or a non
1020    hermitian matrix can be positive definite, Cholesky or LDL
1021    decomposition does not exist because the decompositions require the
1022    matrix to be symmetric or hermitian.
1023
1024    References
1025    ==========
1026
1027    .. [1] https://en.wikipedia.org/wiki/Definiteness_of_a_matrix#Eigenvalues
1028
1029    .. [2] http://mathworld.wolfram.com/PositiveDefiniteMatrix.html
1030
1031    .. [3] Johnson, C. R. "Positive Definite Matrices." Amer.
1032        Math. Monthly 77, 259-264 1970.
1033    """
1034
1035_is_positive_definite.__doc__     = _doc_positive_definite
1036_is_positive_semidefinite.__doc__ = _doc_positive_definite
1037_is_negative_definite.__doc__     = _doc_positive_definite
1038_is_negative_semidefinite.__doc__ = _doc_positive_definite
1039_is_indefinite.__doc__            = _doc_positive_definite
1040
1041
1042def _jordan_form(M, calc_transform=True, *, chop=False):
1043    """Return $(P, J)$ where $J$ is a Jordan block
1044    matrix and $P$ is a matrix such that $M = P J P^{-1}$
1045
1046    Parameters
1047    ==========
1048
1049    calc_transform : bool
1050        If ``False``, then only $J$ is returned.
1051
1052    chop : bool
1053        All matrices are converted to exact types when computing
1054        eigenvalues and eigenvectors.  As a result, there may be
1055        approximation errors.  If ``chop==True``, these errors
1056        will be truncated.
1057
1058    Examples
1059    ========
1060
1061    >>> from sympy.matrices import Matrix
1062    >>> M = Matrix([[ 6,  5, -2, -3], [-3, -1,  3,  3], [ 2,  1, -2, -3], [-1,  1,  5,  5]])
1063    >>> P, J = M.jordan_form()
1064    >>> J
1065    Matrix([
1066    [2, 1, 0, 0],
1067    [0, 2, 0, 0],
1068    [0, 0, 2, 1],
1069    [0, 0, 0, 2]])
1070
1071    See Also
1072    ========
1073
1074    jordan_block
1075    """
1076
1077    if not M.is_square:
1078        raise NonSquareMatrixError("Only square matrices have Jordan forms")
1079
1080    mat        = M
1081    has_floats = M.has(Float)
1082
1083    if has_floats:
1084        try:
1085            max_prec = max(term._prec for term in M.values() if isinstance(term, Float))
1086        except ValueError:
1087            # if no term in the matrix is explicitly a Float calling max()
1088            # will throw a error so setting max_prec to default value of 53
1089            max_prec = 53
1090
1091        # setting minimum max_dps to 15 to prevent loss of precision in
1092        # matrix containing non evaluated expressions
1093        max_dps = max(prec_to_dps(max_prec), 15)
1094
1095    def restore_floats(*args):
1096        """If ``has_floats`` is `True`, cast all ``args`` as
1097        matrices of floats."""
1098
1099        if has_floats:
1100            args = [m.evalf(n=max_dps, chop=chop) for m in args]
1101        if len(args) == 1:
1102            return args[0]
1103
1104        return args
1105
1106    # cache calculations for some speedup
1107    mat_cache = {}
1108
1109    def eig_mat(val, pow):
1110        """Cache computations of ``(M - val*I)**pow`` for quick
1111        retrieval"""
1112
1113        if (val, pow) in mat_cache:
1114            return mat_cache[(val, pow)]
1115
1116        if (val, pow - 1) in mat_cache:
1117            mat_cache[(val, pow)] = mat_cache[(val, pow - 1)].multiply(
1118                    mat_cache[(val, 1)], dotprodsimp=None)
1119        else:
1120            mat_cache[(val, pow)] = (mat - val*M.eye(M.rows)).pow(pow)
1121
1122        return mat_cache[(val, pow)]
1123
1124    # helper functions
1125    def nullity_chain(val, algebraic_multiplicity):
1126        """Calculate the sequence  [0, nullity(E), nullity(E**2), ...]
1127        until it is constant where ``E = M - val*I``"""
1128
1129        # mat.rank() is faster than computing the null space,
1130        # so use the rank-nullity theorem
1131        cols    = M.cols
1132        ret     = [0]
1133        nullity = cols - eig_mat(val, 1).rank()
1134        i       = 2
1135
1136        while nullity != ret[-1]:
1137            ret.append(nullity)
1138
1139            if nullity == algebraic_multiplicity:
1140                break
1141
1142            nullity  = cols - eig_mat(val, i).rank()
1143            i       += 1
1144
1145            # Due to issues like #7146 and #15872, SymPy sometimes
1146            # gives the wrong rank. In this case, raise an error
1147            # instead of returning an incorrect matrix
1148            if nullity < ret[-1] or nullity > algebraic_multiplicity:
1149                raise MatrixError(
1150                    "SymPy had encountered an inconsistent "
1151                    "result while computing Jordan block: "
1152                    "{}".format(M))
1153
1154        return ret
1155
1156    def blocks_from_nullity_chain(d):
1157        """Return a list of the size of each Jordan block.
1158        If d_n is the nullity of E**n, then the number
1159        of Jordan blocks of size n is
1160
1161            2*d_n - d_(n-1) - d_(n+1)"""
1162
1163        # d[0] is always the number of columns, so skip past it
1164        mid = [2*d[n] - d[n - 1] - d[n + 1] for n in range(1, len(d) - 1)]
1165        # d is assumed to plateau with "d[ len(d) ] == d[-1]", so
1166        # 2*d_n - d_(n-1) - d_(n+1) == d_n - d_(n-1)
1167        end = [d[-1] - d[-2]] if len(d) > 1 else [d[0]]
1168
1169        return mid + end
1170
1171    def pick_vec(small_basis, big_basis):
1172        """Picks a vector from big_basis that isn't in
1173        the subspace spanned by small_basis"""
1174
1175        if len(small_basis) == 0:
1176            return big_basis[0]
1177
1178        for v in big_basis:
1179            _, pivots = M.hstack(*(small_basis + [v])).echelon_form(
1180                    with_pivots=True)
1181
1182            if pivots[-1] == len(small_basis):
1183                return v
1184
1185    # roots doesn't like Floats, so replace them with Rationals
1186    if has_floats:
1187        mat = mat.applyfunc(lambda x: nsimplify(x, rational=True))
1188
1189    # first calculate the jordan block structure
1190    eigs = mat.eigenvals()
1191
1192    # Make sure that we have all roots in radical form
1193    for x in eigs:
1194        if x.has(CRootOf):
1195            raise MatrixError(
1196                "Jordan normal form is not implemented if the matrix have "
1197                "eigenvalues in CRootOf form")
1198
1199    # most matrices have distinct eigenvalues
1200    # and so are diagonalizable.  In this case, don't
1201    # do extra work!
1202    if len(eigs.keys()) == mat.cols:
1203        blocks     = list(sorted(eigs.keys(), key=default_sort_key))
1204        jordan_mat = mat.diag(*blocks)
1205
1206        if not calc_transform:
1207            return restore_floats(jordan_mat)
1208
1209        jordan_basis = [eig_mat(eig, 1).nullspace()[0]
1210                for eig in blocks]
1211        basis_mat    = mat.hstack(*jordan_basis)
1212
1213        return restore_floats(basis_mat, jordan_mat)
1214
1215    block_structure = []
1216
1217    for eig in sorted(eigs.keys(), key=default_sort_key):
1218        algebraic_multiplicity = eigs[eig]
1219        chain = nullity_chain(eig, algebraic_multiplicity)
1220        block_sizes = blocks_from_nullity_chain(chain)
1221
1222        # if block_sizes =       = [a, b, c, ...], then the number of
1223        # Jordan blocks of size 1 is a, of size 2 is b, etc.
1224        # create an array that has (eig, block_size) with one
1225        # entry for each block
1226        size_nums = [(i+1, num) for i, num in enumerate(block_sizes)]
1227
1228        # we expect larger Jordan blocks to come earlier
1229        size_nums.reverse()
1230
1231        block_structure.extend(
1232            (eig, size) for size, num in size_nums for _ in range(num))
1233
1234    jordan_form_size = sum(size for eig, size in block_structure)
1235
1236    if jordan_form_size != M.rows:
1237        raise MatrixError(
1238            "SymPy had encountered an inconsistent result while "
1239            "computing Jordan block. : {}".format(M))
1240
1241    blocks     = (mat.jordan_block(size=size, eigenvalue=eig) for eig, size in block_structure)
1242    jordan_mat = mat.diag(*blocks)
1243
1244    if not calc_transform:
1245        return restore_floats(jordan_mat)
1246
1247    # For each generalized eigenspace, calculate a basis.
1248    # We start by looking for a vector in null( (A - eig*I)**n )
1249    # which isn't in null( (A - eig*I)**(n-1) ) where n is
1250    # the size of the Jordan block
1251    #
1252    # Ideally we'd just loop through block_structure and
1253    # compute each generalized eigenspace.  However, this
1254    # causes a lot of unneeded computation.  Instead, we
1255    # go through the eigenvalues separately, since we know
1256    # their generalized eigenspaces must have bases that
1257    # are linearly independent.
1258    jordan_basis = []
1259
1260    for eig in sorted(eigs.keys(), key=default_sort_key):
1261        eig_basis = []
1262
1263        for block_eig, size in block_structure:
1264            if block_eig != eig:
1265                continue
1266
1267            null_big   = (eig_mat(eig, size)).nullspace()
1268            null_small = (eig_mat(eig, size - 1)).nullspace()
1269
1270            # we want to pick something that is in the big basis
1271            # and not the small, but also something that is independent
1272            # of any other generalized eigenvectors from a different
1273            # generalized eigenspace sharing the same eigenvalue.
1274            vec      = pick_vec(null_small + eig_basis, null_big)
1275            new_vecs = [eig_mat(eig, i).multiply(vec, dotprodsimp=None)
1276                    for i in range(size)]
1277
1278            eig_basis.extend(new_vecs)
1279            jordan_basis.extend(reversed(new_vecs))
1280
1281    basis_mat = mat.hstack(*jordan_basis)
1282
1283    return restore_floats(basis_mat, jordan_mat)
1284
1285
1286def _left_eigenvects(M, **flags):
1287    """Returns left eigenvectors and eigenvalues.
1288
1289    This function returns the list of triples (eigenval, multiplicity,
1290    basis) for the left eigenvectors. Options are the same as for
1291    eigenvects(), i.e. the ``**flags`` arguments gets passed directly to
1292    eigenvects().
1293
1294    Examples
1295    ========
1296
1297    >>> from sympy.matrices import Matrix
1298    >>> M = Matrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]])
1299    >>> M.eigenvects()
1300    [(-1, 1, [Matrix([
1301    [-1],
1302    [ 1],
1303    [ 0]])]), (0, 1, [Matrix([
1304    [ 0],
1305    [-1],
1306    [ 1]])]), (2, 1, [Matrix([
1307    [2/3],
1308    [1/3],
1309    [  1]])])]
1310    >>> M.left_eigenvects()
1311    [(-1, 1, [Matrix([[-2, 1, 1]])]), (0, 1, [Matrix([[-1, -1, 1]])]), (2,
1312    1, [Matrix([[1, 1, 1]])])]
1313
1314    """
1315
1316    eigs = M.transpose().eigenvects(**flags)
1317
1318    return [(val, mult, [l.transpose() for l in basis]) for val, mult, basis in eigs]
1319
1320
1321def _singular_values(M):
1322    """Compute the singular values of a Matrix
1323
1324    Examples
1325    ========
1326
1327    >>> from sympy import Matrix, Symbol
1328    >>> x = Symbol('x', real=True)
1329    >>> M = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
1330    >>> M.singular_values()
1331    [sqrt(x**2 + 1), 1, 0]
1332
1333    See Also
1334    ========
1335
1336    condition_number
1337    """
1338
1339    if M.rows >= M.cols:
1340        valmultpairs = M.H.multiply(M).eigenvals()
1341    else:
1342        valmultpairs = M.multiply(M.H).eigenvals()
1343
1344    # Expands result from eigenvals into a simple list
1345    vals = []
1346
1347    for k, v in valmultpairs.items():
1348        vals += [sqrt(k)] * v  # dangerous! same k in several spots!
1349
1350    # Pad with zeros if singular values are computed in reverse way,
1351    # to give consistent format.
1352    if len(vals) < M.cols:
1353        vals += [M.zero] * (M.cols - len(vals))
1354
1355    # sort them in descending order
1356    vals.sort(reverse=True, key=default_sort_key)
1357
1358    return vals
1359