1import collections
2import functools
3import math
4import typing
5from types import FunctionType
6
7from ..core import (Add, Atom, Basic, Dummy, Expr, Float, I, Integer, Pow,
8                    Symbol, count_ops, oo, symbols)
9from ..core.compatibility import as_int, is_sequence
10from ..core.logic import fuzzy_and
11from ..core.sympify import sympify
12from ..functions import Max, Min, exp, factorial, sqrt
13from ..polys import PurePoly, cancel, gcd, roots
14from ..printing.defaults import DefaultPrinting
15from ..simplify import nsimplify, signsimp
16from ..simplify import simplify as _simplify
17from ..utilities import default_sort_key, flatten
18
19
20def _iszero(x):
21    """Returns True if x is zero."""
22    r = x.equals(0)
23    if r is None:
24        raise NotImplementedError(f'Zero-decision problem for {x}')
25    return r
26
27
28class MatrixError(Exception):
29    """Generic matrix error."""
30
31
32class ShapeError(ValueError, MatrixError):
33    """Wrong matrix shape."""
34
35
36class NonSquareMatrixError(ShapeError):
37    """Raised when a square matrix is expected."""
38
39
40class MatrixBase(DefaultPrinting):
41    """Base class for matrices."""
42
43    # Added just for numpy compatibility
44    __array_priority__ = 11
45
46    is_Matrix = True
47    is_Identity: typing.Optional[bool] = None
48    _class_priority = 3
49    _sympify = staticmethod(sympify)
50
51    __hash__ = None  # type: ignore[assignment]
52
53    @classmethod
54    def _handle_creation_inputs(cls, *args, **kwargs):
55        """Return the number of rows, cols and flat matrix elements.
56
57        Examples
58        ========
59
60        Matrix can be constructed as follows:
61
62        * from a nested list of iterables
63
64        >>> Matrix(((1, 2 + I), (3, 4)))
65        Matrix([
66        [1, 2 + I],
67        [3,     4]])
68
69        * from un-nested iterable (interpreted as a column)
70
71        >>> Matrix([1, 2])
72        Matrix([
73        [1],
74        [2]])
75
76        * from un-nested iterable with dimensions
77
78        >>> Matrix(1, 2, [1, 2])
79        Matrix([[1, 2]])
80
81        * from no arguments (a 0 x 0 matrix)
82
83        >>> Matrix()
84        Matrix(0, 0, [])
85
86        * from a rule
87
88        >>> Matrix(2, 2, lambda i, j: i/(j + 1))
89        Matrix([
90        [0,   0],
91        [1, 1/2]])
92
93        """
94        from .sparse import SparseMatrixBase
95
96        flat_list = None
97
98        if len(args) == 1:
99            # Matrix(SparseMatrix(...))
100            if isinstance(args[0], SparseMatrixBase):
101                return args[0].rows, args[0].cols, flatten(args[0].tolist())
102
103            # Matrix(Matrix(...))
104            elif isinstance(args[0], MatrixBase):
105                return args[0].rows, args[0].cols, args[0]._mat
106
107            # Matrix(MatrixSymbol('X', 2, 2))
108            elif isinstance(args[0], Basic) and args[0].is_Matrix:
109                return args[0].rows, args[0].cols, args[0].as_explicit()._mat
110
111            # Matrix(numpy.ones((2, 2)))
112            elif hasattr(args[0], '__array__'):
113                # NumPy array or matrix or some other object that implements
114                # __array__. So let's first use this method to get a
115                # numpy.array() and then make a python list out of it.
116                arr = args[0].__array__()
117                if len(arr.shape) == 2:
118                    rows, cols = arr.shape[0], arr.shape[1]
119                    flat_list = [cls._sympify(i) for i in arr.ravel()]
120                    return rows, cols, flat_list
121                elif len(arr.shape) == 1:
122                    rows, cols = arr.shape[0], 1
123                    flat_list = [Integer(0)]*rows
124                    for i, a in enumerate(arr):
125                        flat_list[i] = cls._sympify(a)
126                    return rows, cols, flat_list
127                else:
128                    raise NotImplementedError(
129                        'Diofant supports just 1D and 2D matrices')
130
131            # Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
132            elif is_sequence(args[0]):
133                in_mat = []
134                ncol = set()
135                for row in args[0]:
136                    if isinstance(row, MatrixBase):
137                        in_mat.extend(row.tolist())
138                        assert row.cols or row.rows
139                        ncol.add(row.cols)
140                    else:
141                        in_mat.append(row)
142                        try:
143                            ncol.add(len(row))
144                        except TypeError:
145                            ncol.add(1)
146                if len(ncol) > 1:
147                    raise ValueError(f'Got rows of variable lengths: {sorted(ncol)}')
148                cols = ncol.pop() if ncol else 0
149                rows = len(in_mat) if cols else 0
150                if rows:
151                    if not is_sequence(in_mat[0]):
152                        cols = 1
153                        flat_list = [cls._sympify(i) for i in in_mat]
154                        return rows, cols, flat_list
155                flat_list = []
156                for j in range(rows):
157                    for i in range(cols):
158                        flat_list.append(cls._sympify(in_mat[j][i]))
159
160        elif len(args) == 3:
161            rows = as_int(args[0])
162            cols = as_int(args[1])
163
164            # Matrix(2, 2, lambda i, j: i+j)
165            if len(args) == 3 and isinstance(args[2], collections.abc.Callable):
166                op = args[2]
167                flat_list = []
168                for i in range(rows):
169                    flat_list.extend(
170                        [cls._sympify(op(cls._sympify(i), cls._sympify(j)))
171                         for j in range(cols)])
172
173            # Matrix(2, 2, [1, 2, 3, 4])
174            elif len(args) == 3 and is_sequence(args[2]):
175                flat_list = args[2]
176                if len(flat_list) != rows*cols:
177                    raise ValueError('List length should be equal to rows*columns')
178                flat_list = [cls._sympify(i) for i in flat_list]
179
180        # Matrix()
181        elif len(args) == 0:
182            # Empty Matrix
183            rows = cols = 0
184            flat_list = []
185
186        if flat_list is None:
187            raise TypeError('Data type not understood')
188
189        return rows, cols, flat_list
190
191    def _setitem(self, key, value):
192        """Helper to set value at location given by key.
193
194        Examples
195        ========
196
197        >>> m = Matrix(((1, 2+I), (3, 4)))
198        >>> m
199        Matrix([
200        [1, 2 + I],
201        [3,     4]])
202        >>> m[1, 0] = 9
203        >>> m
204        Matrix([
205        [1, 2 + I],
206        [9,     4]])
207        >>> m[1, 0] = [[0, 1]]
208
209        To replace row r you assign to position r*m where m
210        is the number of columns:
211
212        >>> M = zeros(4)
213        >>> m = M.cols
214        >>> M[3*m] = ones(1, m)*2
215        >>> M
216        Matrix([
217        [0, 0, 0, 0],
218        [0, 0, 0, 0],
219        [0, 0, 0, 0],
220        [2, 2, 2, 2]])
221
222        And to replace column c you can assign to position c:
223
224        >>> M[2] = ones(m, 1)*4
225        >>> M
226        Matrix([
227        [0, 0, 4, 0],
228        [0, 0, 4, 0],
229        [0, 0, 4, 0],
230        [2, 2, 4, 2]])
231
232        """
233        from . import Matrix
234
235        is_slice = isinstance(key, slice)
236        i, j = key = self.key2ij(key)
237        is_mat = isinstance(value, MatrixBase)
238        if type(i) is slice or type(j) is slice:
239            if is_mat:
240                self.copyin_matrix(key, value)
241                return
242            if not isinstance(value, Expr) and is_sequence(value):
243                self.copyin_list(key, value)
244                return
245            raise ValueError(f'unexpected value: {value}')
246        else:
247            if (not is_mat and
248                    not isinstance(value, Basic) and is_sequence(value)):
249                value = Matrix(value)
250                is_mat = True
251            if is_mat:
252                assert not is_slice
253                key = (slice(i, i + value.rows), slice(j, j + value.cols))
254                self.copyin_matrix(key, value)
255            else:
256                return i, j, self._sympify(value)
257
258    def copy(self):
259        """Returns the copy of a matrix."""
260        return self._new(self.rows, self.cols, self._mat)
261
262    def trace(self):
263        """Returns the trace of a matrix."""
264        if not self.is_square:
265            raise NonSquareMatrixError()
266        return self._eval_trace()
267
268    def inv(self, method=None, **kwargs):
269        r"""Returns the inverse of the matrix.
270
271        Parameters
272        ==========
273
274        method : {'GE', 'LU', 'ADJ', 'CH', 'LDL'} or None
275            Selects algorithm for inversion.  For dense matrices
276            available {'GE', 'LU', 'ADJ'}, default is 'GE'.  For
277            sparse: {'CH', 'LDL'}, default is 'LDL'.
278
279        Raises
280        ======
281
282        ValueError
283            If the determinant of the matrix is zero.
284
285        See Also
286        ========
287
288        inverse_LU
289        inverse_GE
290        inverse_ADJ
291
292        """
293        if not self.is_square:
294            raise NonSquareMatrixError()
295        if method is not None:
296            kwargs['method'] = method
297        return self._eval_inverse(**kwargs)
298
299    def inv_mod(self, m):
300        r"""
301        Returns the inverse of the matrix `K` (mod `m`), if it exists.
302
303        Method to find the matrix inverse of `K` (mod `m`) implemented in this function:
304
305        * Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.
306
307        * Compute `r = 1/\mathrm{det}(K) \pmod m`.
308
309        * `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.
310
311        Examples
312        ========
313
314        >>> A = Matrix(2, 2, [1, 2, 3, 4])
315        >>> A.inv_mod(5)
316        Matrix([
317        [3, 1],
318        [4, 2]])
319        >>> A.inv_mod(3)
320        Matrix([
321        [1, 1],
322        [0, 1]])
323
324        """
325        from ..ntheory import totient
326        if not self.is_square:
327            raise NonSquareMatrixError()
328        N = self.cols
329        phi = totient(m)
330        det_K = self.det()
331        if gcd(det_K, m) != 1:
332            raise ValueError(f'Matrix is not invertible (mod {m:d})')
333        det_inv = pow(int(det_K), int(phi - 1), int(m))
334        K_adj = self.cofactorMatrix().transpose()
335        K_inv = self.__class__(N, N, [det_inv*K_adj[i, j] % m for i in range(N) for j in range(N)])
336        return K_inv
337
338    def transpose(self):
339        """Matrix transposition."""
340        return self._eval_transpose()
341
342    T = property(transpose, None, None, 'Matrix transposition.')
343
344    def conjugate(self):
345        """By-element conjugation."""
346        return self._eval_conjugate()
347
348    C = property(conjugate, None, None, 'By-element conjugation.')
349
350    def adjoint(self):
351        """Conjugate transpose or Hermitian conjugation."""
352        return self.T.C
353
354    @property
355    def H(self):
356        """Return Hermite conjugate.
357
358        Examples
359        ========
360
361        >>> m = Matrix((0, 1 + I, 2, 3))
362        >>> m
363        Matrix([
364        [    0],
365        [1 + I],
366        [    2],
367        [    3]])
368        >>> m.H
369        Matrix([[0, 1 - I, 2, 3]])
370
371        See Also
372        ========
373
374        conjugate: By-element conjugation
375        D: Dirac conjugation
376
377        """
378        return self.T.C
379
380    @property
381    def D(self):
382        """Return Dirac conjugate (if self.rows == 4).
383
384        Examples
385        ========
386
387        >>> m = Matrix((0, 1 + I, 2, 3))
388        >>> m.D
389        Matrix([[0, 1 - I, -2, -3]])
390        >>> m = (eye(4) + I*eye(4))
391        >>> m[0, 3] = 2
392        >>> m.D
393        Matrix([
394        [1 - I,     0,      0,      0],
395        [    0, 1 - I,      0,      0],
396        [    0,     0, -1 + I,      0],
397        [    2,     0,      0, -1 + I]])
398
399        If the matrix does not have 4 rows an AttributeError will be raised
400        because this property is only defined for matrices with 4 rows.
401
402        >>> Matrix(eye(2)).D
403        Traceback (most recent call last):
404        ...
405        AttributeError: Matrix has no attribute D.
406
407        See Also
408        ========
409
410        conjugate: By-element conjugation
411        H: Hermite conjugation
412
413        """
414        if self.rows != 4:
415            raise AttributeError
416        return self.H*mgamma(0)
417
418    def __array__(self):
419        from .dense import matrix2numpy
420        return matrix2numpy(self)
421
422    def __len__(self):
423        """Return the number of elements of self.
424
425        Implemented mainly so bool(Matrix()) == False.
426
427        """
428        return int(self.rows*self.cols)
429
430    @property
431    def shape(self):
432        """The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
433
434        Examples
435        ========
436
437        >>> M = zeros(2, 3)
438        >>> M.shape
439        (2, 3)
440        >>> M.rows
441        2
442        >>> M.cols
443        3
444
445        """
446        return self.rows, self.cols
447
448    def __sub__(self, a):
449        return self + (-a)
450
451    def __mul__(self, other):
452        """Return self*other where other is either a scalar or a matrix
453        of compatible dimensions.
454
455        Examples
456        ========
457
458        >>> A = Matrix([[1, 2, 3], [4, 5, 6]])
459        >>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
460        True
461        >>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
462        >>> A*B
463        Matrix([
464        [30, 36, 42],
465        [66, 81, 96]])
466        >>> B*A
467        Traceback (most recent call last):
468        ...
469        ShapeError: Matrices size mismatch.
470        >>>
471
472        See Also
473        ========
474
475        matrix_multiply_elementwise
476
477        """
478        if getattr(other, 'is_Matrix', False):
479            A = self
480            B = other
481            if A.cols != B.rows:
482                raise ShapeError('Matrices size mismatch.')
483            if A.cols == 0:
484                return classof(A, B)._new(A.rows, B.cols, lambda i, j: 0)
485            try:
486                blst = B.T.tolist()
487            except AttributeError:
488                return NotImplemented
489            alst = A.tolist()
490            return classof(A, B)._new(A.rows, B.cols, lambda i, j:
491                                      functools.reduce(lambda k, l: k + l,
492                                                       [a_ik * b_kj for a_ik, b_kj in zip(alst[i], blst[j])]))
493        else:
494            return self._new(self.rows, self.cols,
495                             [i*other for i in self._mat])
496
497    def __rmul__(self, a):
498        if getattr(a, 'is_Matrix', False):
499            return self._new(a)*self
500        return self._new(self.rows, self.cols, [a*i for i in self._mat])
501
502    def __pow__(self, num):
503        from ..functions import binomial
504        from . import MutableMatrix, diag, eye
505
506        if not self.is_square:
507            raise NonSquareMatrixError()
508        if isinstance(num, int) or isinstance(num, Integer):
509            n = int(num)
510            if n < 0:
511                return self.inv()**-n   # A**-2 = (A**-1)**2
512            a = eye(self.cols)
513            s = self
514            while n:
515                if n % 2:
516                    a *= s
517                    n -= 1
518                if not n:
519                    break
520                s *= s
521                n //= 2
522            return self._new(a)
523        elif isinstance(num, Expr):
524
525            def jordan_cell_power(jc, n):
526                N = jc.shape[0]
527                l = jc[0, 0]
528                for i in range(N):
529                    for j in range(N - i):
530                        bn = binomial(n, i)
531                        if isinstance(bn, binomial):
532                            bn = bn._eval_expand_func()
533                        jc[j, i + j] = l**(n - i)*bn
534
535            jordan_cells, P = self.jordan_cells()
536            # Make sure jordan_cells matrices are mutable:
537            jordan_cells = [MutableMatrix(j) for j in jordan_cells]
538            for j in jordan_cells:
539                jordan_cell_power(j, num)
540            return self._new(P*diag(*jordan_cells)*P.inv())
541        else:
542            raise TypeError(
543                'Only Diofant expressions or int objects are supported as exponent for matrices')
544
545    def __add__(self, other):
546        """Return self + other, raising ShapeError if shapes don't match."""
547        if getattr(other, 'is_Matrix', False):
548            A = self
549            B = other
550            if A.shape != B.shape:
551                raise ShapeError('Matrix size mismatch.')
552            alst = A.tolist()
553            blst = B.tolist()
554            ret = [Integer(0)]*A.rows
555            for i in range(A.shape[0]):
556                ret[i] = [j + k for j, k in zip(alst[i], blst[i])]
557            rv = classof(A, B)._new(ret)
558            if 0 in A.shape:
559                rv = rv.reshape(*A.shape)
560            return rv
561        raise TypeError(f'cannot add matrix and {type(other)}')
562
563    def __radd__(self, other):
564        return self + other
565
566    def __truediv__(self, other):
567        return self*(Integer(1)/other)
568
569    def __neg__(self):
570        return -1*self
571
572    def multiply(self, b):
573        """Returns self*b
574
575        See Also
576        ========
577
578        dot
579        cross
580        multiply_elementwise
581
582        """
583        return self*b
584
585    def add(self, b):
586        """Return self + b."""
587        return self + b
588
589    def table(self, printer, rowstart='[', rowend=']', rowsep='\n',
590              colsep=', ', align='right'):
591        r"""
592        String form of Matrix as a table.
593
594        ``printer`` is the printer to use for on the elements (generally
595        something like StrPrinter())
596
597        ``rowstart`` is the string used to start each row (by default '[').
598
599        ``rowend`` is the string used to end each row (by default ']').
600
601        ``rowsep`` is the string used to separate rows (by default a newline).
602
603        ``colsep`` is the string used to separate columns (by default ', ').
604
605        ``align`` defines how the elements are aligned. Must be one of 'left',
606        'right', or 'center'.  You can also use '<', '>', and '^' to mean the
607        same thing, respectively.
608
609        This is used by the string printer for Matrix.
610
611        Examples
612        ========
613
614        >>> from diofant.printing.str import StrPrinter
615        >>> M = Matrix([[1, 2], [-33, 4]])
616        >>> printer = StrPrinter()
617        >>> M.table(printer)
618        '[  1, 2]\n[-33, 4]'
619        >>> print(M.table(printer))
620        [  1, 2]
621        [-33, 4]
622        >>> print(M.table(printer, rowsep=',\n'))
623        [  1, 2],
624        [-33, 4]
625        >>> print('[%s]' % M.table(printer, rowsep=',\n'))
626        [[  1, 2],
627        [-33, 4]]
628        >>> print(M.table(printer, colsep=' '))
629        [  1 2]
630        [-33 4]
631        >>> print(M.table(printer, align='center'))
632        [ 1 , 2]
633        [-33, 4]
634        >>> print(M.table(printer, rowstart='{', rowend='}'))
635        {  1, 2}
636        {-33, 4}
637
638        """
639        # Handle zero dimensions:
640        if self.rows == 0 or self.cols == 0:
641            return '[]'
642        # Build table of string representations of the elements
643        res = []
644        # Track per-column max lengths for pretty alignment
645        maxlen = [0] * self.cols
646        for i in range(self.rows):
647            res.append([])
648            for j in range(self.cols):
649                s = printer._print(self[i, j])
650                res[-1].append(s)
651                maxlen[j] = max(len(s), maxlen[j])
652        # Patch strings together
653        align = {
654            'left': 'ljust',
655            'right': 'rjust',
656            'center': 'center',
657            '<': 'ljust',
658            '>': 'rjust',
659            '^': 'center',
660        }[align]
661        for i, row in enumerate(res):
662            for j, elem in enumerate(row):
663                row[j] = getattr(elem, align)(maxlen[j])
664            res[i] = rowstart + colsep.join(row) + rowend
665        return rowsep.join(res)
666
667    def _format_str(self, printer):
668        if self.rows == 0 or self.cols == 0:
669            return f'Matrix({self.rows}, {self.cols}, [])'
670        if self.rows == 1:
671            return 'Matrix([%s])' % self.table(printer, rowsep=',\n')  # noqa: SFS101
672        return 'Matrix([\n%s])' % self.table(printer, rowsep=',\n')  # noqa: SFS101
673
674    def _repr_pretty_(self, p, cycle):
675        from ..printing import pretty
676        p.text(pretty(self))
677
678    def _repr_latex_(self):
679        from ..printing import latex
680        return latex(self, mode='equation')
681
682    def cholesky(self):
683        """Returns the Cholesky decomposition L of a matrix A
684        such that L * L.T = A
685
686        A must be a square, symmetric, positive-definite
687        and non-singular matrix.
688
689        Examples
690        ========
691
692        >>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
693        >>> A.cholesky()
694        Matrix([
695        [ 5, 0, 0],
696        [ 3, 3, 0],
697        [-1, 1, 3]])
698        >>> A.cholesky() * A.cholesky().T
699        Matrix([
700        [25, 15, -5],
701        [15, 18,  0],
702        [-5,  0, 11]])
703
704        See Also
705        ========
706
707        LDLdecomposition
708        LUdecomposition
709        QRdecomposition
710
711        """
712        if not self.is_square:
713            raise NonSquareMatrixError('Matrix must be square.')
714        if not self.is_symmetric():
715            raise ValueError('Matrix must be symmetric.')
716        return self._cholesky()
717
718    def LDLdecomposition(self):
719        """Returns the LDL Decomposition (L, D) of matrix A,
720        such that L * D * L.T == A
721        This method eliminates the use of square root.
722        Further this ensures that all the diagonal entries of L are 1.
723        A must be a square, symmetric, positive-definite
724        and non-singular matrix.
725
726        Examples
727        ========
728
729        >>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
730        >>> L, D = A.LDLdecomposition()
731        >>> L
732        Matrix([
733        [   1,   0, 0],
734        [ 3/5,   1, 0],
735        [-1/5, 1/3, 1]])
736        >>> D
737        Matrix([
738        [25, 0, 0],
739        [ 0, 9, 0],
740        [ 0, 0, 9]])
741        >>> L * D * L.T * A.inv() == eye(A.rows)
742        True
743
744        See Also
745        ========
746
747        cholesky
748        LUdecomposition
749        QRdecomposition
750
751        """
752        if not self.is_square:
753            raise NonSquareMatrixError('Matrix must be square.')
754        if not self.is_symmetric():
755            raise ValueError('Matrix must be symmetric.')
756        return self._LDLdecomposition()
757
758    def lower_triangular_solve(self, rhs):
759        """Solves Ax = B, where A is a lower triangular matrix.
760
761        See Also
762        ========
763
764        upper_triangular_solve
765        cholesky_solve
766        diagonal_solve
767        LDLsolve
768        LUsolve
769        QRsolve
770        pinv_solve
771
772        """
773        if not self.is_square:
774            raise NonSquareMatrixError('Matrix must be square.')
775        if rhs.rows != self.rows:
776            raise ShapeError('Matrices size mismatch.')
777        if not self.is_lower:
778            raise ValueError('Matrix must be lower triangular.')
779        return self._lower_triangular_solve(rhs)
780
781    def upper_triangular_solve(self, rhs):
782        """Solves Ax = B, where A is an upper triangular matrix.
783
784        See Also
785        ========
786
787        lower_triangular_solve
788        cholesky_solve
789        diagonal_solve
790        LDLsolve
791        LUsolve
792        QRsolve
793        pinv_solve
794
795        """
796        if not self.is_square:
797            raise NonSquareMatrixError('Matrix must be square.')
798        if rhs.rows != self.rows:
799            raise TypeError('Matrix size mismatch.')
800        if not self.is_upper:
801            raise TypeError('Matrix is not upper triangular.')
802        return self._upper_triangular_solve(rhs)
803
804    def cholesky_solve(self, rhs):
805        """Solves Ax = B using Cholesky decomposition,
806        for a general square non-singular matrix.
807        For a non-square matrix with rows > cols,
808        the least squares solution is returned.
809
810        See Also
811        ========
812
813        lower_triangular_solve
814        upper_triangular_solve
815        diagonal_solve
816        LDLsolve
817        LUsolve
818        QRsolve
819        pinv_solve
820
821        """
822        if self.is_symmetric():
823            L = self._cholesky()
824        elif self.rows >= self.cols:
825            L = (self.T*self)._cholesky()
826            rhs = self.T*rhs
827        else:
828            raise NotImplementedError('Under-determined System.')
829        Y = L._lower_triangular_solve(rhs)
830        return (L.T)._upper_triangular_solve(Y)
831
832    def diagonal_solve(self, rhs):
833        """Solves Ax = B efficiently, where A is a diagonal Matrix,
834        with non-zero diagonal entries.
835
836        Examples
837        ========
838
839        >>> A = eye(2)*2
840        >>> B = Matrix([[1, 2], [3, 4]])
841        >>> A.diagonal_solve(B) == B/2
842        True
843
844        See Also
845        ========
846
847        lower_triangular_solve
848        upper_triangular_solve
849        cholesky_solve
850        LDLsolve
851        LUsolve
852        QRsolve
853        pinv_solve
854
855        """
856        if not self.is_diagonal():
857            raise TypeError('Matrix should be diagonal')
858        if rhs.rows != self.rows:
859            raise TypeError('Size mis-match')
860        return self._diagonal_solve(rhs)
861
862    def LDLsolve(self, rhs):
863        """Solves Ax = B using LDL decomposition,
864        for a general square and non-singular matrix.
865
866        For a non-square matrix with rows > cols,
867        the least squares solution is returned.
868
869        Examples
870        ========
871
872        >>> A = eye(2)*2
873        >>> B = Matrix([[1, 2], [3, 4]])
874        >>> A.LDLsolve(B) == B/2
875        True
876
877        See Also
878        ========
879
880        LDLdecomposition
881        lower_triangular_solve
882        upper_triangular_solve
883        cholesky_solve
884        diagonal_solve
885        LUsolve
886        QRsolve
887        pinv_solve
888
889        """
890        if self.is_symmetric():
891            L, D = self.LDLdecomposition()
892        elif self.rows >= self.cols:
893            L, D = (self.T*self).LDLdecomposition()
894            rhs = self.T*rhs
895        else:
896            raise NotImplementedError('Under-determined System.')
897        Y = L._lower_triangular_solve(rhs)
898        Z = D._diagonal_solve(Y)
899        return (L.T)._upper_triangular_solve(Z)
900
901    def solve_least_squares(self, rhs, method='CH'):
902        """Return the least-square fit to the data.
903
904        By default the cholesky_solve routine is used (method='CH'); other
905        methods of matrix inversion can be used.
906
907        Examples
908        ========
909
910        >>> A = Matrix([1, 2, 3])
911        >>> B = Matrix([2, 3, 4])
912        >>> S = Matrix(A.row_join(B))
913        >>> S
914        Matrix([
915        [1, 2],
916        [2, 3],
917        [3, 4]])
918
919        If each line of S represent coefficients of Ax + By
920        and x and y are [2, 3] then S*xy is:
921
922        >>> r = S*Matrix([2, 3])
923        >>> r
924        Matrix([
925        [ 8],
926        [13],
927        [18]])
928
929        But let's add 1 to the middle value and then solve for the
930        least-squares value of xy:
931
932        >>> xy = S.solve_least_squares(Matrix([8, 14, 18]))
933        >>> xy
934        Matrix([
935        [ 5/3],
936        [10/3]])
937
938        The error is given by S*xy - r:
939
940        >>> S*xy - r
941        Matrix([
942        [1/3],
943        [1/3],
944        [1/3]])
945        >>> _.norm().evalf(2)
946        0.58
947
948        If a different xy is used, the norm will be higher:
949
950        >>> xy += ones(2, 1)/10
951        >>> (S*xy - r).norm().evalf(2)
952        1.5
953
954        See Also
955        ========
956
957        inv
958
959        """
960        if method == 'CH':
961            return self.cholesky_solve(rhs)
962        t = self.T
963        return (t*self).inv(method=method)*t*rhs
964
965    def extract(self, rowsList, colsList):
966        """Return a submatrix by specifying a list of rows and columns.
967        Negative indices can be given. All indices must be in the range
968        -n <= i < n where n is the number of rows or columns.
969
970        Examples
971        ========
972
973        >>> m = Matrix(4, 3, range(12))
974        >>> m
975        Matrix([
976        [0,  1,  2],
977        [3,  4,  5],
978        [6,  7,  8],
979        [9, 10, 11]])
980        >>> m.extract([0, 1, 3], [0, 1])
981        Matrix([
982        [0,  1],
983        [3,  4],
984        [9, 10]])
985
986        Rows or columns can be repeated:
987
988        >>> m.extract([0, 0, 1], [-1])
989        Matrix([
990        [2],
991        [2],
992        [5]])
993
994        Every other row can be taken by using range to provide the indices:
995
996        >>> m.extract(range(0, m.rows, 2), [-1])
997        Matrix([
998        [2],
999        [8]])
1000
1001        """
1002        cols = self.cols
1003        flat_list = self._mat
1004        rowsList = [a2idx(k, self.rows) for k in rowsList]
1005        colsList = [a2idx(k, self.cols) for k in colsList]
1006        return self._new(len(rowsList), len(colsList),
1007                         lambda i, j: flat_list[rowsList[i]*cols + colsList[j]])
1008
1009    def key2bounds(self, keys):
1010        """Converts a key with potentially mixed types of keys (integer and slice)
1011        into a tuple of ranges and raises an error if any index is out of self's
1012        range.
1013
1014        See Also
1015        ========
1016
1017        key2ij
1018
1019        """
1020        islice, jslice = [isinstance(k, slice) for k in keys]
1021        if islice:
1022            assert self.rows
1023            rlo, rhi = keys[0].indices(self.rows)[:2]
1024        else:
1025            rlo = a2idx(keys[0], self.rows)
1026            rhi = rlo + 1
1027        if jslice:
1028            assert self.cols
1029            clo, chi = keys[1].indices(self.cols)[:2]
1030        else:
1031            clo = a2idx(keys[1], self.cols)
1032            chi = clo + 1
1033        return rlo, rhi, clo, chi
1034
1035    def key2ij(self, key):
1036        """Converts key into canonical form, converting integers or indexable
1037        items into valid integers for self's range or returning slices
1038        unchanged.
1039
1040        See Also
1041        ========
1042
1043        key2bounds
1044
1045        """
1046        if is_sequence(key):
1047            if not len(key) == 2:
1048                raise TypeError('key must be a sequence of length 2')
1049            return [a2idx(i, n) if not isinstance(i, slice) else i
1050                    for i, n in zip(key, self.shape)]
1051        elif isinstance(key, slice):
1052            return key.indices(len(self))[:2]
1053        else:
1054            return divmod(a2idx(key, len(self)), self.cols)
1055
1056    def evalf(self, dps=15, **options):
1057        """Apply evalf() to each element of self."""
1058        return self.applyfunc(lambda i: i.evalf(dps, **options))
1059
1060    n = evalf
1061
1062    def atoms(self, *types):
1063        """Returns the atoms that form the current object.
1064
1065        Examples
1066        ========
1067
1068        >>> Matrix([[x]])
1069        Matrix([[x]])
1070        >>> _.atoms()
1071        {x}
1072
1073        """
1074        if types:
1075            types = tuple(t if isinstance(t, type) else type(t) for t in types)
1076        else:
1077            types = Atom,
1078        result = set()
1079        for i in self:
1080            result.update(i.atoms(*types))
1081        return result
1082
1083    @property
1084    def free_symbols(self):
1085        """Returns the free symbols within the matrix.
1086
1087        Examples
1088        ========
1089
1090        >>> Matrix([[x], [1]]).free_symbols
1091        {x}
1092
1093        """
1094        return set().union(*[i.free_symbols for i in self])
1095
1096    def subs(self, *args, **kwargs):  # should mirror core.basic.subs
1097        """Return a new matrix with subs applied to each entry.
1098
1099        Examples
1100        ========
1101
1102        >>> SparseMatrix(1, 1, [x])
1103        Matrix([[x]])
1104        >>> _.subs({x: y})
1105        Matrix([[y]])
1106        >>> Matrix(_).subs({y: x})
1107        Matrix([[x]])
1108
1109        """
1110        return self.applyfunc(lambda x: x.subs(*args, **kwargs))
1111
1112    def xreplace(self, rule):  # should mirror core.basic.xreplace
1113        """Return a new matrix with xreplace applied to each entry.
1114
1115        Examples
1116        ========
1117
1118        >>> SparseMatrix(1, 1, [x])
1119        Matrix([[x]])
1120        >>> _.xreplace({x: y})
1121        Matrix([[y]])
1122        >>> Matrix(_).xreplace({y: x})
1123        Matrix([[x]])
1124
1125        """
1126        return self.applyfunc(lambda x: x.xreplace(rule))
1127
1128    def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
1129               mul=True, log=True, multinomial=True, basic=True, **hints):
1130        """Apply core.function.expand to each entry of the matrix.
1131
1132        Examples
1133        ========
1134
1135        >>> Matrix(1, 1, [x*(x+1)])
1136        Matrix([[x*(x + 1)]])
1137        >>> _.expand()
1138        Matrix([[x**2 + x]])
1139
1140        """
1141        return self.applyfunc(lambda x: x.expand(
1142                              deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
1143                              **hints))
1144
1145    def simplify(self, ratio=1.7, measure=count_ops):
1146        """Apply simplify to each element of the matrix.
1147
1148        Examples
1149        ========
1150
1151        >>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
1152        Matrix([[x*sin(y)**2 + x*cos(y)**2]])
1153        >>> _.simplify()
1154        Matrix([[x]])
1155
1156        """
1157        return self.applyfunc(lambda x: x.simplify(ratio, measure))
1158    _eval_simplify = simplify
1159
1160    def print_nonzero(self, symb='X'):
1161        """Shows location of non-zero entries for fast shape lookup.
1162
1163        Examples
1164        ========
1165
1166        >>> m = Matrix(2, 3, lambda i, j: i*3+j)
1167        >>> m
1168        Matrix([
1169        [0, 1, 2],
1170        [3, 4, 5]])
1171        >>> m.print_nonzero()
1172        [ XX]
1173        [XXX]
1174        >>> m = eye(4)
1175        >>> m.print_nonzero('x')
1176        [x   ]
1177        [ x  ]
1178        [  x ]
1179        [   x]
1180
1181        """
1182        s = []
1183        for i in range(self.rows):
1184            line = []
1185            for j in range(self.cols):
1186                if self[i, j] == 0:
1187                    line.append(' ')
1188                else:
1189                    line.append(str(symb))
1190            s.append(f"[{''.join(line)}]")
1191        print('\n'.join(s))
1192
1193    def LUsolve(self, rhs, iszerofunc=_iszero):
1194        """Solve the linear system Ax = rhs for x where A = self.
1195
1196        This is for symbolic matrices, for real or complex ones use
1197        mpmath.lu_solve or mpmath.qr_solve.
1198
1199        See Also
1200        ========
1201
1202        lower_triangular_solve
1203        upper_triangular_solve
1204        cholesky_solve
1205        diagonal_solve
1206        LDLsolve
1207        QRsolve
1208        pinv_solve
1209        LUdecomposition
1210
1211        """
1212        if rhs.rows != self.rows:
1213            raise ShapeError('`self` and `rhs` must have the same number of rows.')
1214
1215        A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
1216        n = self.rows
1217        b = rhs.permuteFwd(perm).as_mutable()
1218        # forward substitution, all diag entries are scaled to 1
1219        for i in range(n):
1220            for j in range(i):
1221                scale = A[i, j]
1222                b.zip_row_op(i, j, lambda x, y: x - y*scale)
1223        # backward substitution
1224        for i in range(n - 1, -1, -1):
1225            for j in range(i + 1, n):
1226                scale = A[i, j]
1227                b.zip_row_op(i, j, lambda x, y: x - y*scale)
1228            scale = A[i, i]
1229            b.row_op(i, lambda x, _: x/scale)
1230        return rhs.__class__(b)
1231
1232    def LUdecomposition(self, iszerofunc=_iszero):
1233        """Returns the decomposition LU and the row swaps p.
1234
1235        Examples
1236        ========
1237
1238        >>> a = Matrix([[4, 3], [6, 3]])
1239        >>> L, U, _ = a.LUdecomposition()
1240        >>> L
1241        Matrix([
1242        [  1, 0],
1243        [3/2, 1]])
1244        >>> U
1245        Matrix([
1246        [4,    3],
1247        [0, -3/2]])
1248
1249        See Also
1250        ========
1251
1252        cholesky
1253        LDLdecomposition
1254        QRdecomposition
1255        LUdecomposition_Simple
1256        LUdecompositionFF
1257        LUsolve
1258
1259        """
1260        combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
1261        L = self.zeros(self.rows)
1262        U = self.zeros(self.rows)
1263        for i in range(self.rows):
1264            for j in range(self.rows):
1265                if i > j:
1266                    L[i, j] = combined[i, j]
1267                else:
1268                    if i == j:
1269                        L[i, i] = 1
1270                    U[i, j] = combined[i, j]
1271        return L, U, p
1272
1273    def LUdecomposition_Simple(self, iszerofunc=_iszero):
1274        """Returns A comprised of L, U (L's diag entries are 1) and
1275        p which is the list of the row swaps (in order).
1276
1277        See Also
1278        ========
1279
1280        LUdecomposition
1281        LUdecompositionFF
1282        LUsolve
1283
1284        """
1285        if not self.is_square:
1286            raise NonSquareMatrixError('A Matrix must be square to apply LUdecomposition_Simple().')
1287        n = self.rows
1288        A = self.as_mutable()
1289        p = []
1290        # factorization
1291        for j in range(n):
1292            for i in range(j):
1293                for k in range(i):
1294                    A[i, j] = A[i, j] - A[i, k]*A[k, j]
1295            pivot = -1
1296            for i in range(j, n):
1297                for k in range(j):
1298                    A[i, j] = A[i, j] - A[i, k]*A[k, j]
1299                # find the first non-zero pivot, includes any expression
1300                if pivot == -1 and not iszerofunc(A[i, j]):
1301                    pivot = i
1302            if pivot < 0:
1303                # this result is based on iszerofunc's analysis of the possible pivots, so even though
1304                # the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
1305                raise ValueError('No nonzero pivot found; inversion failed.')
1306            if pivot != j:  # row must be swapped
1307                A.row_swap(pivot, j)
1308                p.append([pivot, j])
1309            scale = 1 / A[j, j]
1310            for i in range(j + 1, n):
1311                A[i, j] = A[i, j]*scale
1312        return A, p
1313
1314    def LUdecompositionFF(self):
1315        """Compute a fraction-free LU decomposition.
1316
1317        Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
1318        If the elements of the matrix belong to some integral domain I, then all
1319        elements of L, D and U are guaranteed to belong to I.
1320
1321        **Reference**
1322            - W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
1323              for LU and QR factors". Frontiers in Computer Science in China,
1324              Vol 2, no. 1, pp. 67-80, 2008.
1325
1326        See Also
1327        ========
1328
1329        LUdecomposition
1330        LUdecomposition_Simple
1331        LUsolve
1332
1333        """
1334        from . import SparseMatrix
1335        zeros = SparseMatrix.zeros
1336        eye = SparseMatrix.eye
1337
1338        n, m = self.rows, self.cols
1339        U, L, P = self.as_mutable(), eye(n), eye(n)
1340        DD = zeros(n, n)
1341        oldpivot = 1
1342
1343        for k in range(n - 1):
1344            if U[k, k] == 0:
1345                for kpivot in range(k + 1, n):
1346                    if U[kpivot, k]:
1347                        break
1348                else:
1349                    raise ValueError('Matrix is not full rank')
1350                U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
1351                L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
1352                P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
1353            L[k, k] = Ukk = U[k, k]
1354            DD[k, k] = oldpivot*Ukk
1355            for i in range(k + 1, n):
1356                L[i, k] = Uik = U[i, k]
1357                for j in range(k + 1, m):
1358                    U[i, j] = (Ukk*U[i, j] - U[k, j]*Uik) / oldpivot
1359                U[i, k] = 0
1360            oldpivot = Ukk
1361        DD[n - 1, n - 1] = oldpivot
1362        return P, L, DD, U
1363
1364    def cofactorMatrix(self, method='berkowitz'):
1365        """Return a matrix containing the cofactor of each element.
1366
1367        See Also
1368        ========
1369
1370        cofactor
1371        minorEntry
1372        minorMatrix
1373        adjugate
1374
1375        """
1376        out = self._new(self.rows, self.cols, lambda i, j:
1377                        self.cofactor(i, j, method))
1378        return out
1379
1380    def minorEntry(self, i, j, method='berkowitz'):
1381        """Calculate the minor of an element.
1382
1383        See Also
1384        ========
1385
1386        minorMatrix
1387        cofactor
1388        cofactorMatrix
1389
1390        """
1391        if not 0 <= i < self.rows or not 0 <= j < self.cols:
1392            raise ValueError('`i` and `j` must satisfy 0 <= i < `self.rows` ' +
1393                             f'({self.rows:d})' + f'and 0 <= j < `self.cols` ({self.cols:d}).')
1394        return self.minorMatrix(i, j).det(method)
1395
1396    def minorMatrix(self, i, j):
1397        """Creates the minor matrix of a given element.
1398
1399        See Also
1400        ========
1401
1402        minorEntry
1403        cofactor
1404        cofactorMatrix
1405
1406        """
1407        if not 0 <= i < self.rows or not 0 <= j < self.cols:
1408            raise ValueError('`i` and `j` must satisfy 0 <= i < `self.rows` ' +
1409                             f'({self.rows:d})' + f'and 0 <= j < `self.cols` ({self.cols:d}).')
1410        M = self.as_mutable()
1411        del M[i, :]
1412        del M[:, j]
1413        return self._new(M)
1414
1415    def cofactor(self, i, j, method='berkowitz'):
1416        """Calculate the cofactor of an element.
1417
1418        See Also
1419        ========
1420
1421        cofactorMatrix
1422        minorEntry
1423        minorMatrix
1424
1425        """
1426        if (i + j) % 2 == 0:
1427            return self.minorEntry(i, j, method)
1428        else:
1429            return -1*self.minorEntry(i, j, method)
1430
1431    def jacobian(self, X):
1432        """Calculates the Jacobian matrix (derivative of a vectorial function).
1433
1434        Parameters
1435        ==========
1436
1437        self : vector of expressions representing functions f_i(x_1, ..., x_n).
1438        X : set of x_i's in order, it can be a list or a Matrix
1439
1440        Both self and X can be a row or a column matrix in any order
1441        (i.e., jacobian() should always work).
1442
1443        Examples
1444        ========
1445
1446        >>> from diofant.abc import rho, phi
1447        >>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
1448        >>> Y = Matrix([rho, phi])
1449        >>> X.jacobian(Y)
1450        Matrix([
1451        [cos(phi), -rho*sin(phi)],
1452        [sin(phi),  rho*cos(phi)],
1453        [   2*rho,             0]])
1454        >>> X = Matrix([rho*cos(phi), rho*sin(phi)])
1455        >>> X.jacobian(Y)
1456        Matrix([
1457        [cos(phi), -rho*sin(phi)],
1458        [sin(phi),  rho*cos(phi)]])
1459
1460        See Also
1461        ========
1462
1463        diofant.matrices.dense.hessian
1464        diofant.matrices.dense.wronskian
1465
1466        """
1467        if not isinstance(X, MatrixBase):
1468            X = self._new(X)
1469        # Both X and self can be a row or a column matrix, so we need to make
1470        # sure all valid combinations work, but everything else fails:
1471        if self.shape[0] == 1:
1472            m = self.shape[1]
1473        elif self.shape[1] == 1:
1474            m = self.shape[0]
1475        else:
1476            raise TypeError('self must be a row or a column matrix')
1477        if X.shape[0] == 1:
1478            n = X.shape[1]
1479        elif X.shape[1] == 1:
1480            n = X.shape[0]
1481        else:
1482            raise TypeError('X must be a row or a column matrix')
1483
1484        # m is the number of functions and n is the number of variables
1485        # computing the Jacobian is now easy:
1486        return self._new(m, n, lambda j, i: self[j].diff(X[i]))
1487
1488    def QRdecomposition(self):
1489        """Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.
1490
1491        Examples
1492        ========
1493
1494        This is the example from wikipedia:
1495
1496        >>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
1497        >>> Q, R = A.QRdecomposition()
1498        >>> Q
1499        Matrix([
1500        [ 6/7, -69/175, -58/175],
1501        [ 3/7, 158/175,   6/175],
1502        [-2/7,    6/35,  -33/35]])
1503        >>> R
1504        Matrix([
1505        [14,  21, -14],
1506        [ 0, 175, -70],
1507        [ 0,   0,  35]])
1508        >>> A == Q*R
1509        True
1510
1511        QR factorization of an identity matrix:
1512
1513        >>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
1514        >>> Q, R = A.QRdecomposition()
1515        >>> Q
1516        Matrix([
1517        [1, 0, 0],
1518        [0, 1, 0],
1519        [0, 0, 1]])
1520        >>> R
1521        Matrix([
1522        [1, 0, 0],
1523        [0, 1, 0],
1524        [0, 0, 1]])
1525
1526        See Also
1527        ========
1528
1529        cholesky
1530        LDLdecomposition
1531        LUdecomposition
1532        QRsolve
1533
1534        """
1535        cls = self.__class__
1536        mat = self.as_mutable()
1537
1538        if not mat.rows >= mat.cols:
1539            raise MatrixError(
1540                'The number of rows must be greater than columns')
1541        n = mat.rows
1542        m = mat.cols
1543        rank = n
1544        row_reduced = mat.rref()[0]
1545        for i in range(row_reduced.rows):
1546            if row_reduced[i, :].norm() == 0:
1547                rank -= 1
1548        if not rank == mat.cols:
1549            raise MatrixError('The rank of the matrix must match the columns')
1550        Q, R = mat.zeros(n, m), mat.zeros(m)
1551        for j in range(m):      # for each column vector
1552            tmp = mat[:, j]     # take original v
1553            for i in range(j):
1554                # subtract the project of mat on new vector
1555                tmp -= Q[:, i]*mat[:, j].dot(Q[:, i])
1556                tmp.expand()
1557            # normalize it
1558            R[j, j] = tmp.norm()
1559            Q[:, j] = tmp / R[j, j]
1560            if Q[:, j].norm() != 1:
1561                raise NotImplementedError("Couldn't normalize the "
1562                                          f'vector {j:d}.')
1563            for i in range(j):
1564                R[i, j] = Q[:, i].dot(mat[:, j])
1565        return cls(Q), cls(R)
1566
1567    def QRsolve(self, b):
1568        """Solve the linear system 'Ax = b'.
1569
1570        'self' is the matrix 'A', the method argument is the vector
1571        'b'.  The method returns the solution vector 'x'.  If 'b' is a
1572        matrix, the system is solved for each column of 'b' and the
1573        return value is a matrix of the same shape as 'b'.
1574
1575        This method is slower (approximately by a factor of 2) but
1576        more stable for floating-point arithmetic than the LUsolve method.
1577        However, LUsolve usually uses an exact arithmetic, so you don't need
1578        to use QRsolve.
1579
1580        This is mainly for educational purposes and symbolic matrices, for real
1581        (or complex) matrices use mpmath.qr_solve.
1582
1583        See Also
1584        ========
1585
1586        lower_triangular_solve
1587        upper_triangular_solve
1588        cholesky_solve
1589        diagonal_solve
1590        LDLsolve
1591        LUsolve
1592        pinv_solve
1593        QRdecomposition
1594
1595        """
1596        Q, R = self.as_mutable().QRdecomposition()
1597        y = Q.T*b
1598
1599        # back substitution to solve R*x = y:
1600        # We build up the result "backwards" in the vector 'x' and reverse it
1601        # only in the end.
1602        x = []
1603        n = R.rows
1604        for j in range(n - 1, -1, -1):
1605            tmp = y[j, :]
1606            for k in range(j + 1, n):
1607                tmp -= R[j, k]*x[n - 1 - k]
1608            x.append(tmp / R[j, j])
1609        return self._new([row._mat for row in reversed(x)])
1610
1611    def cross(self, b):
1612        """Return the cross product of `self` and `b` relaxing the condition
1613        of compatible dimensions: if each has 3 elements, a matrix of the
1614        same type and shape as `self` will be returned. If `b` has the same
1615        shape as `self` then common identities for the cross product (like
1616        `a x b = - b x a`) will hold.
1617
1618        See Also
1619        ========
1620
1621        dot
1622        multiply
1623        multiply_elementwise
1624
1625        """
1626        if not is_sequence(b):
1627            raise TypeError(f'`b` must be an ordered iterable or Matrix, not {type(b)}.')
1628        if not self.rows * self.cols == b.rows * b.cols == 3:
1629            raise ShapeError('Dimensions incorrect for cross product.')
1630        else:
1631            return self._new(self.rows, self.cols, (
1632                (self[1]*b[2] - self[2]*b[1]),
1633                (self[2]*b[0] - self[0]*b[2]),
1634                (self[0]*b[1] - self[1]*b[0])))
1635
1636    def dot(self, b):
1637        """Return the dot product of Matrix self and b relaxing the condition
1638        of compatible dimensions: if either the number of rows or columns are
1639        the same as the length of b then the dot product is returned. If self
1640        is a row or column vector, a scalar is returned. Otherwise, a list
1641        of results is returned (and in that case the number of columns in self
1642        must match the length of b).
1643
1644        Examples
1645        ========
1646
1647        >>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1648        >>> v = [1, 1, 1]
1649        >>> M[0, :].dot(v)
1650        6
1651        >>> M[:, 0].dot(v)
1652        12
1653        >>> M.dot(v)
1654        [6, 15, 24]
1655
1656        See Also
1657        ========
1658
1659        cross
1660        multiply
1661        multiply_elementwise
1662
1663        """
1664        from . import Matrix
1665
1666        if not isinstance(b, MatrixBase):
1667            if is_sequence(b):
1668                if len(b) != self.cols and len(b) != self.rows:
1669                    raise ShapeError('Dimensions incorrect for dot product.')
1670                return self.dot(Matrix(b))
1671            else:
1672                raise TypeError(f'`b` must be an ordered iterable or Matrix, not {type(b)}')
1673
1674        mat = self
1675        if mat.cols == b.rows:
1676            if b.cols != 1:
1677                mat = mat.T
1678                b = b.T
1679            prod = flatten((mat*b).tolist())
1680            if len(prod) == 1:
1681                return prod[0]
1682            return prod
1683        if mat.cols == b.cols:
1684            return mat.dot(b.T)
1685        elif mat.rows == b.rows:
1686            return mat.T.dot(b)
1687        else:
1688            raise ShapeError('Dimensions incorrect for dot product.')
1689
1690    def multiply_elementwise(self, b):
1691        """Return the Hadamard product (elementwise product) of A and B
1692
1693        Examples
1694        ========
1695
1696        >>> A = Matrix([[0, 1, 2], [3, 4, 5]])
1697        >>> B = Matrix([[1, 10, 100], [100, 10, 1]])
1698        >>> A.multiply_elementwise(B)
1699        Matrix([
1700        [  0, 10, 200],
1701        [300, 40,   5]])
1702
1703        See Also
1704        ========
1705
1706        cross
1707        dot
1708        multiply
1709
1710        """
1711        from . import matrix_multiply_elementwise
1712
1713        return matrix_multiply_elementwise(self, b)
1714
1715    def values(self):
1716        """Return non-zero values of self."""
1717        return [i for i in flatten(self.tolist()) if not i.is_zero]
1718
1719    def norm(self, ord=None):
1720        """Return the Norm of a Matrix or Vector.
1721        In the simplest case this is the geometric size of the vector
1722        Other norms can be specified by the ord parameter
1723
1724
1725        =====  ============================  ==========================
1726        ord    norm for matrices             norm for vectors
1727        =====  ============================  ==========================
1728        None   Frobenius norm                2-norm
1729        'fro'  Frobenius norm                - does not exist
1730        inf    --                            max(abs(x))
1731        -inf   --                            min(abs(x))
1732        1      --                            as below
1733        -1     --                            as below
1734        2      2-norm (largest sing. value)  as below
1735        -2     smallest singular value       as below
1736        other  - does not exist              sum(abs(x)**ord)**(1./ord)
1737        =====  ============================  ==========================
1738
1739        Examples
1740        ========
1741
1742        >>> x = Symbol('x', real=True)
1743        >>> v = Matrix([cos(x), sin(x)])
1744        >>> trigsimp(v.norm())
1745        1
1746        >>> v.norm(10)
1747        (sin(x)**10 + cos(x)**10)**(1/10)
1748        >>> A = Matrix([[1, 1], [1, 1]])
1749        >>> A.norm(2)  # Spectral norm (max of |Ax|/|x| under 2-vector-norm)
1750        2
1751        >>> A.norm(-2)  # Inverse spectral norm (smallest singular value)
1752        0
1753        >>> A.norm()  # Frobenius Norm
1754        2
1755        >>> Matrix([1, -2]).norm(oo)
1756        2
1757        >>> Matrix([-1, 2]).norm(-oo)
1758        1
1759
1760        See Also
1761        ========
1762
1763        normalized
1764
1765        """
1766        # Row or Column Vector Norms
1767        vals = list(self.values()) or [0]
1768        if self.rows == 1 or self.cols == 1:
1769            if ord == 2 or ord is None:  # Common case sqrt(<x, x>)
1770                return sqrt(Add(*(abs(i)**2 for i in vals)))
1771
1772            elif ord == 1:  # sum(abs(x))
1773                return Add(*(abs(i) for i in vals))
1774
1775            elif ord == oo:  # max(abs(x))
1776                return Max(*[abs(i) for i in vals])
1777
1778            elif ord == -oo:  # min(abs(x))
1779                return Min(*[abs(i) for i in vals])
1780
1781            # Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
1782            # Note that while useful this is not mathematically a norm
1783            return Pow(Add(*(abs(i)**ord for i in vals)), Integer(1) / ord)
1784
1785        # Matrix Norms
1786        else:
1787            if ord == 2:  # Spectral Norm
1788                # Maximum singular value
1789                return Max(*self.singular_values())
1790
1791            elif ord == -2:
1792                # Minimum singular value
1793                return Min(*self.singular_values())
1794
1795            elif (ord is None or isinstance(ord, str) and ord.lower() in
1796                    ['f', 'fro', 'frobenius', 'vector']):
1797                # Reshape as vector and send back to norm function
1798                return self.vec().norm(ord=2)
1799
1800            else:
1801                raise NotImplementedError('Matrix Norms under development')
1802
1803    def normalized(self):
1804        """Return the normalized version of ``self``.
1805
1806        See Also
1807        ========
1808
1809        norm
1810
1811        """
1812        if self.rows != 1 and self.cols != 1:
1813            raise ShapeError('A Matrix must be a vector to normalize.')
1814        norm = self.norm()
1815        out = self.applyfunc(lambda i: i / norm)
1816        return out
1817
1818    def project(self, v):
1819        """Return the projection of ``self`` onto the line containing ``v``.
1820
1821        Examples
1822        ========
1823
1824        >>> V = Matrix([sqrt(3)/2, Rational(1, 2)])
1825        >>> x = Matrix([[1, 0]])
1826        >>> V.project(x)
1827        Matrix([[sqrt(3)/2, 0]])
1828        >>> V.project(-x)
1829        Matrix([[sqrt(3)/2, 0]])
1830
1831        """
1832        return v*(self.dot(v) / v.dot(v))
1833
1834    def permuteBkwd(self, perm):
1835        """Permute the rows of the matrix with the given permutation in reverse.
1836
1837        Examples
1838        ========
1839
1840        >>> M = eye(3)
1841        >>> M.permuteBkwd([[0, 1], [0, 2]])
1842        Matrix([
1843        [0, 1, 0],
1844        [0, 0, 1],
1845        [1, 0, 0]])
1846
1847        See Also
1848        ========
1849
1850        permuteFwd
1851
1852        """
1853        copy = self.copy()
1854        for i in range(len(perm) - 1, -1, -1):
1855            copy.row_swap(perm[i][0], perm[i][1])
1856        return copy
1857
1858    def permuteFwd(self, perm):
1859        """Permute the rows of the matrix with the given permutation.
1860
1861        Examples
1862        ========
1863
1864        >>> M = eye(3)
1865        >>> M.permuteFwd([[0, 1], [0, 2]])
1866        Matrix([
1867        [0, 0, 1],
1868        [1, 0, 0],
1869        [0, 1, 0]])
1870
1871        See Also
1872        ========
1873
1874        permuteBkwd
1875
1876        """
1877        copy = self.copy()
1878        for p in perm:
1879            copy.row_swap(p[0], p[1])
1880        return copy
1881
1882    def exp(self):
1883        """Return the exponentiation of a square matrix."""
1884        if not self.is_square:
1885            raise NonSquareMatrixError('Exponentiation is valid only '
1886                                       'for square matrices')
1887        cells, P = self.jordan_cells()
1888
1889        def _jblock_exponential(b):
1890            # This function computes the matrix exponential for one single Jordan block
1891            nr = b.rows
1892            l = b[0, 0]
1893            if nr == 1:
1894                res = exp(l)
1895            else:
1896                from . import eye
1897
1898                # extract the diagonal part
1899                d = b[0, 0]*eye(nr)
1900                # and the nilpotent part
1901                n = b-d
1902                # compute its exponential
1903                nex = eye(nr)
1904                for i in range(1, nr):
1905                    nex = nex+n**i/factorial(i)
1906                # combine the two parts
1907                res = exp(b[0, 0])*nex
1908            return res
1909
1910        blocks = list(map(_jblock_exponential, cells))
1911        from . import diag
1912        eJ = diag(* blocks)
1913        # n = self.rows
1914        ret = P*eJ*P.inv()
1915        return type(self)(ret)
1916
1917    @property
1918    def is_square(self):
1919        """Checks if a matrix is square.
1920
1921        A matrix is square if the number of rows equals the number of columns.
1922        The empty matrix is square by definition, since the number of rows and
1923        the number of columns are both zero.
1924
1925        Examples
1926        ========
1927
1928        >>> a = Matrix([[1, 2, 3], [4, 5, 6]])
1929        >>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
1930        >>> c = Matrix([])
1931        >>> a.is_square
1932        False
1933        >>> b.is_square
1934        True
1935        >>> c.is_square
1936        True
1937
1938        """
1939        return self.rows == self.cols
1940
1941    @property
1942    def is_zero(self):
1943        """Checks if a matrix is a zero matrix.
1944
1945        A matrix is zero if every element is zero.  A matrix need not be square
1946        to be considered zero.  The empty matrix is zero by the principle of
1947        vacuous truth.  For a matrix that may or may not be zero (e.g.
1948        contains a symbol), this will be None
1949
1950        Examples
1951        ========
1952
1953        >>> a = Matrix([[0, 0], [0, 0]])
1954        >>> b = zeros(3, 4)
1955        >>> c = Matrix([[0, 1], [0, 0]])
1956        >>> d = Matrix([])
1957        >>> e = Matrix([[x, 0], [0, 0]])
1958        >>> a.is_zero
1959        True
1960        >>> b.is_zero
1961        True
1962        >>> c.is_zero
1963        False
1964        >>> d.is_zero
1965        True
1966        >>> e.is_zero
1967
1968        """
1969        if any(i.is_nonzero for i in self):
1970            return False
1971        if not any(i.is_zero is None for i in self):
1972            return True
1973
1974    def is_nilpotent(self):
1975        """Checks if a matrix is nilpotent.
1976
1977        A matrix B is nilpotent if for some integer k, B**k is
1978        a zero matrix.
1979
1980        Examples
1981        ========
1982
1983        >>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
1984        >>> a.is_nilpotent()
1985        True
1986
1987        >>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
1988        >>> a.is_nilpotent()
1989        False
1990
1991        """
1992        if not self:
1993            return True
1994        if not self.is_square:
1995            raise NonSquareMatrixError(
1996                'Nilpotency is valid only for square matrices')
1997        x = Dummy('x')
1998        if self.charpoly(x).args[0] == x**self.rows:
1999            return True
2000        return False
2001
2002    @property
2003    def is_upper(self):
2004        """Check if matrix is an upper triangular matrix. True can be returned
2005        even if the matrix is not square.
2006
2007        Examples
2008        ========
2009
2010        >>> m = Matrix(2, 2, [1, 0, 0, 1])
2011        >>> m
2012        Matrix([
2013        [1, 0],
2014        [0, 1]])
2015        >>> m.is_upper
2016        True
2017
2018        >>> m = Matrix(4, 3, [5, 1, 9, 0, 4, 6, 0, 0, 5, 0, 0, 0])
2019        >>> m
2020        Matrix([
2021        [5, 1, 9],
2022        [0, 4, 6],
2023        [0, 0, 5],
2024        [0, 0, 0]])
2025        >>> m.is_upper
2026        True
2027
2028        >>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
2029        >>> m
2030        Matrix([
2031        [4, 2, 5],
2032        [6, 1, 1]])
2033        >>> m.is_upper
2034        False
2035
2036        See Also
2037        ========
2038
2039        is_lower
2040        is_diagonal
2041        is_upper_hessenberg
2042
2043        """
2044        return all(self[i, j].is_zero
2045                   for i in range(1, self.rows)
2046                   for j in range(min(i, self.cols)))
2047
2048    @property
2049    def is_lower(self):
2050        """Check if matrix is a lower triangular matrix. True can be returned
2051        even if the matrix is not square.
2052
2053        Examples
2054        ========
2055
2056        >>> m = Matrix(2, 2, [1, 0, 0, 1])
2057        >>> m
2058        Matrix([
2059        [1, 0],
2060        [0, 1]])
2061        >>> m.is_lower
2062        True
2063
2064        >>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4, 0, 6, 6, 5])
2065        >>> m
2066        Matrix([
2067        [0, 0, 0],
2068        [2, 0, 0],
2069        [1, 4, 0],
2070        [6, 6, 5]])
2071        >>> m.is_lower
2072        True
2073
2074        >>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
2075        >>> m
2076        Matrix([
2077        [x**2 + y, x + y**2],
2078        [       0,    x + y]])
2079        >>> m.is_lower
2080        False
2081
2082        See Also
2083        ========
2084
2085        is_upper
2086        is_diagonal
2087        is_lower_hessenberg
2088
2089        """
2090        return all(self[i, j].is_zero
2091                   for i in range(self.rows)
2092                   for j in range(i + 1, self.cols))
2093
2094    @property
2095    def is_hermitian(self):
2096        """Checks if the matrix is Hermitian.
2097
2098        In a Hermitian matrix element i,j is the complex conjugate of
2099        element j,i.
2100
2101        Examples
2102        ========
2103
2104        >>> a = Matrix([[1, I], [-I, 1]])
2105        >>> a
2106        Matrix([
2107        [ 1, I],
2108        [-I, 1]])
2109        >>> a.is_hermitian
2110        True
2111        >>> a[0, 0] = 2*I
2112        >>> a.is_hermitian
2113        False
2114        >>> a[0, 0] = x
2115        >>> a.is_hermitian
2116        >>> a[0, 1] = a[1, 0]*I
2117        >>> a.is_hermitian
2118        False
2119
2120        """
2121        def cond():
2122            yield self.is_square
2123            yield fuzzy_and(
2124                self[i, i].is_extended_real for i in range(self.rows))
2125            yield fuzzy_and(
2126                (self[i, j] - self[j, i].conjugate()).is_zero
2127                for i in range(self.rows)
2128                for j in range(i + 1, self.cols))
2129        return fuzzy_and(i for i in cond())
2130
2131    @property
2132    def is_upper_hessenberg(self):
2133        """Checks if the matrix is the upper-Hessenberg form.
2134
2135        The upper hessenberg matrix has zero entries
2136        below the first subdiagonal.
2137
2138        Examples
2139        ========
2140
2141        >>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
2142        >>> a
2143        Matrix([
2144        [1, 4, 2, 3],
2145        [3, 4, 1, 7],
2146        [0, 2, 3, 4],
2147        [0, 0, 1, 3]])
2148        >>> a.is_upper_hessenberg
2149        True
2150
2151        See Also
2152        ========
2153
2154        is_lower_hessenberg
2155        is_upper
2156
2157        """
2158        return all(self[i, j].is_zero
2159                   for i in range(2, self.rows)
2160                   for j in range(min(self.cols, i - 1)))
2161
2162    @property
2163    def is_lower_hessenberg(self):
2164        r"""Checks if the matrix is in the lower-Hessenberg form.
2165
2166        The lower hessenberg matrix has zero entries
2167        above the first superdiagonal.
2168
2169        Examples
2170        ========
2171
2172        >>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
2173        >>> a
2174        Matrix([
2175        [1, 2, 0, 0],
2176        [5, 2, 3, 0],
2177        [3, 4, 3, 7],
2178        [5, 6, 1, 1]])
2179        >>> a.is_lower_hessenberg
2180        True
2181
2182        See Also
2183        ========
2184
2185        is_upper_hessenberg
2186        is_lower
2187
2188        """
2189        return all(self[i, j].is_zero
2190                   for i in range(self.rows)
2191                   for j in range(i + 2, self.cols))
2192
2193    def is_symbolic(self):
2194        """Checks if any elements contain Symbols.
2195
2196        Examples
2197        ========
2198
2199        >>> M = Matrix([[x, y], [1, 0]])
2200        >>> M.is_symbolic()
2201        True
2202
2203        """
2204        return any(element.has(Symbol) for element in self.values())
2205
2206    def is_symmetric(self, simplify=True):
2207        """Check if matrix is symmetric matrix,
2208        that is square matrix and is equal to its transpose.
2209
2210        By default, simplifications occur before testing symmetry.
2211        They can be skipped using 'simplify=False'; while speeding things a bit,
2212        this may however induce false negatives.
2213
2214        Examples
2215        ========
2216
2217        >>> m = Matrix(2, 2, [0, 1, 1, 2])
2218        >>> m
2219        Matrix([
2220        [0, 1],
2221        [1, 2]])
2222        >>> m.is_symmetric()
2223        True
2224
2225        >>> m = Matrix(2, 2, [0, 1, 2, 0])
2226        >>> m
2227        Matrix([
2228        [0, 1],
2229        [2, 0]])
2230        >>> m.is_symmetric()
2231        False
2232
2233        >>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
2234        >>> m
2235        Matrix([
2236        [0, 0, 0],
2237        [0, 0, 0]])
2238        >>> m.is_symmetric()
2239        False
2240
2241        >>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])
2242        >>> m
2243        Matrix([
2244        [         1, x**2 + 2*x + 1, y],
2245        [(x + 1)**2,              2, 0],
2246        [         y,              0, 3]])
2247        >>> m.is_symmetric()
2248        True
2249
2250        If the matrix is already simplified, you may speed-up is_symmetric()
2251        test by using 'simplify=False'.
2252
2253        >>> m.is_symmetric(simplify=False)
2254        False
2255        >>> m1 = m.expand()
2256        >>> m1.is_symmetric(simplify=False)
2257        True
2258
2259        """
2260        if not self.is_square:
2261            return False
2262        if simplify:
2263            delta = self - self.transpose()
2264            delta.simplify()
2265            return delta.equals(self.zeros(self.rows, self.cols))
2266        else:
2267            return self == self.transpose()
2268
2269    def is_anti_symmetric(self, simplify=True):
2270        """Check if matrix M is an antisymmetric matrix,
2271        that is, M is a square matrix with all M[i, j] == -M[j, i].
2272
2273        When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
2274        simplified before testing to see if it is zero. By default,
2275        the Diofant simplify function is used. To use a custom function
2276        set simplify to a function that accepts a single argument which
2277        returns a simplified expression. To skip simplification, set
2278        simplify to False but note that although this will be faster,
2279        it may induce false negatives.
2280
2281        Examples
2282        ========
2283
2284        >>> m = Matrix(2, 2, [0, 1, -1, 0])
2285        >>> m
2286        Matrix([
2287        [ 0, 1],
2288        [-1, 0]])
2289        >>> m.is_anti_symmetric()
2290        True
2291        >>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
2292        >>> m
2293        Matrix([
2294        [ 0, 0, x],
2295        [-y, 0, 0]])
2296        >>> m.is_anti_symmetric()
2297        False
2298
2299        >>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
2300        ...                   -(x + 1)**2, 0, x*y,
2301        ...                   -y, -x*y, 0])
2302
2303        Simplification of matrix elements is done by default so even
2304        though two elements which should be equal and opposite wouldn't
2305        pass an equality test, the matrix is still reported as
2306        anti-symmetric:
2307
2308        >>> m[0, 1] == -m[1, 0]
2309        False
2310        >>> m.is_anti_symmetric()
2311        True
2312
2313        If 'simplify=False' is used for the case when a Matrix is already
2314        simplified, this will speed things up. Here, we see that without
2315        simplification the matrix does not appear anti-symmetric:
2316
2317        >>> m.is_anti_symmetric(simplify=False)
2318        False
2319
2320        But if the matrix were already expanded, then it would appear
2321        anti-symmetric and simplification in the is_anti_symmetric routine
2322        is not needed:
2323
2324        >>> m = m.expand()
2325        >>> m.is_anti_symmetric(simplify=False)
2326        True
2327
2328        """
2329        # accept custom simplification
2330        simpfunc = simplify if isinstance(simplify, FunctionType) else \
2331            _simplify if simplify else False
2332
2333        if not self.is_square:
2334            return False
2335        n = self.rows
2336        if simplify:
2337            for i in range(n):
2338                # diagonal
2339                if not simpfunc(self[i, i]).is_zero:
2340                    return False
2341                # others
2342                for j in range(i + 1, n):
2343                    diff = self[i, j] + self[j, i]
2344                    if not simpfunc(diff).is_zero:
2345                        return False
2346            return True
2347        else:
2348            for i in range(n):
2349                for j in range(i, n):
2350                    if self[i, j] != -self[j, i]:
2351                        return False
2352            return True
2353
2354    def is_diagonal(self):
2355        """Check if matrix is diagonal,
2356        that is matrix in which the entries outside the main diagonal are all zero.
2357
2358        Examples
2359        ========
2360
2361        >>> m = Matrix(2, 2, [1, 0, 0, 2])
2362        >>> m
2363        Matrix([
2364        [1, 0],
2365        [0, 2]])
2366        >>> m.is_diagonal()
2367        True
2368
2369        >>> m = Matrix(2, 2, [1, 1, 0, 2])
2370        >>> m
2371        Matrix([
2372        [1, 1],
2373        [0, 2]])
2374        >>> m.is_diagonal()
2375        False
2376
2377        >>> m = diag(1, 2, 3)
2378        >>> m
2379        Matrix([
2380        [1, 0, 0],
2381        [0, 2, 0],
2382        [0, 0, 3]])
2383        >>> m.is_diagonal()
2384        True
2385
2386        See Also
2387        ========
2388
2389        is_lower
2390        is_upper
2391        is_diagonalizable
2392        diagonalize
2393
2394        """
2395        for i in range(self.rows):
2396            for j in range(self.cols):
2397                if i != j and self[i, j]:
2398                    return False
2399        return True
2400
2401    def det(self, method='bareiss'):
2402        """Computes the matrix determinant using the method "method".
2403
2404        Possible values for "method":
2405          bareiss ... det_bareis
2406          berkowitz ... berkowitz_det
2407          det_LU ... det_LU_decomposition
2408
2409        See Also
2410        ========
2411
2412        det_bareiss
2413        berkowitz_det
2414        det_LU_decomposition
2415
2416        """
2417        # if methods were made internal and all determinant calculations
2418        # passed through here, then these lines could be factored out of
2419        # the method routines
2420        if not self.is_square:
2421            raise NonSquareMatrixError()
2422        if not self:
2423            return Integer(1)
2424        if method == 'bareiss':
2425            return self.det_bareiss()
2426        elif method == 'berkowitz':
2427            return self.berkowitz_det()
2428        elif method == 'det_LU':
2429            return self.det_LU_decomposition()
2430        else:
2431            raise ValueError(f"Determinant method '{method}' unrecognized")
2432
2433    def det_bareiss(self):
2434        """Compute matrix determinant using Bareiss' fraction-free
2435        algorithm which is an extension of the well known Gaussian
2436        elimination method. This approach is best suited for dense
2437        symbolic matrices and will result in a determinant with
2438        minimal number of fractions. It means that less term
2439        rewriting is needed on resulting formulae.
2440
2441        TODO: Implement algorithm for sparse matrices (SFF),
2442        Hong R. Lee, B.David Saunders, Fraction Free Gaussian Elimination
2443        for Sparse Matrices, In Journal of Symbolic Computation, Volume 19,
2444        Issue 5, 1995, Pages 393-402, ISSN 0747-7171,
2445        https://www.sciencedirect.com/science/article/pii/S074771718571022X.
2446
2447        See Also
2448        ========
2449
2450        det
2451        berkowitz_det
2452
2453        """
2454        if not self.is_square:
2455            raise NonSquareMatrixError()
2456        if not self:
2457            return Integer(1)
2458
2459        M, n = self.copy().as_mutable(), self.rows
2460
2461        if n == 1:
2462            det = M[0, 0]
2463        elif n == 2:
2464            det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
2465        elif n == 3:
2466            det = (M[0, 0]*M[1, 1]*M[2, 2] + M[0, 1]*M[1, 2]*M[2, 0] + M[0, 2]*M[1, 0]*M[2, 1]) - \
2467                  (M[0, 2]*M[1, 1]*M[2, 0] + M[0, 0]*M[1, 2]*M[2, 1] + M[0, 1]*M[1, 0]*M[2, 2])
2468        else:
2469            sign = 1  # track current sign in case of column swap
2470
2471            for k in range(n - 1):
2472                # look for a pivot in the current column
2473                # and assume det == 0 if none is found
2474                if M[k, k] == 0:
2475                    for i in range(k + 1, n):
2476                        if M[i, k]:
2477                            M.row_swap(i, k)
2478                            sign *= -1
2479                            break
2480                    else:
2481                        return Integer(0)
2482
2483                # proceed with Bareiss' fraction-free (FF)
2484                # form of Gaussian elimination algorithm
2485                for i in range(k + 1, n):
2486                    for j in range(k + 1, n):
2487                        D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
2488
2489                        if k > 0:
2490                            D /= M[k - 1, k - 1]
2491
2492                        if D.is_Atom:
2493                            M[i, j] = D
2494                        else:
2495                            M[i, j] = cancel(D)
2496
2497            det = sign*M[n - 1, n - 1]
2498
2499        return det.expand()
2500
2501    def det_LU_decomposition(self):
2502        """Compute matrix determinant using LU decomposition
2503
2504        Note that this method fails if the LU decomposition itself
2505        fails. In particular, if the matrix has no inverse this method
2506        will fail.
2507
2508        TODO: Implement algorithm for sparse matrices (SFF),
2509        Hong R. Lee, B.David Saunders, Fraction Free Gaussian Elimination
2510        for Sparse Matrices, In Journal of Symbolic Computation, Volume 19,
2511        Issue 5, 1995, Pages 393-402, ISSN 0747-7171,
2512        https://www.sciencedirect.com/science/article/pii/S074771718571022X.
2513
2514        See Also
2515        ========
2516
2517        det
2518        det_bareiss
2519        berkowitz_det
2520
2521        """
2522        if not self.is_square:
2523            raise NonSquareMatrixError()
2524        if not self:
2525            return Integer(1)
2526
2527        M, n = self.copy(), self.rows
2528        p, prod = [], 1
2529        l, u, p = M.LUdecomposition()
2530        if len(p) % 2:
2531            prod = -1
2532
2533        for k in range(n):
2534            prod = prod*u[k, k]*l[k, k]
2535
2536        return prod.expand()
2537
2538    def adjugate(self, method='berkowitz'):
2539        """Returns the adjugate matrix.
2540
2541        Adjugate matrix is the transpose of the cofactor matrix.
2542
2543        https://en.wikipedia.org/wiki/Adjugate
2544
2545        See Also
2546        ========
2547
2548        cofactorMatrix
2549        transpose
2550        berkowitz
2551
2552        """
2553        return self.cofactorMatrix(method).T
2554
2555    def inverse_LU(self, iszerofunc=_iszero):
2556        """Calculates the inverse using LU decomposition.
2557
2558        See Also
2559        ========
2560
2561        diofant.matrices.matrices.MatrixBase.inv
2562        inverse_GE
2563        inverse_ADJ
2564
2565        """
2566        if not self.is_square:
2567            raise NonSquareMatrixError()
2568
2569        ok = self.rref(simplify=True)[0]
2570        if any(iszerofunc(ok[j, j]) for j in range(ok.rows)):
2571            raise ValueError('Matrix det == 0; not invertible.')
2572
2573        return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
2574
2575    def inverse_GE(self, iszerofunc=_iszero):
2576        """Calculates the inverse using Gaussian elimination.
2577
2578        See Also
2579        ========
2580
2581        diofant.matrices.matrices.MatrixBase.inv
2582        inverse_LU
2583        inverse_ADJ
2584
2585        """
2586        from . import Matrix
2587
2588        if not self.is_square:
2589            raise NonSquareMatrixError('A Matrix must be square to invert.')
2590
2591        big = Matrix.hstack(self.as_mutable(), Matrix.eye(self.rows))
2592        red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
2593        if any(iszerofunc(red[j, j]) for j in range(red.rows)):
2594            raise ValueError('Matrix det == 0; not invertible.')
2595
2596        return self._new(red[:, big.rows:])
2597
2598    def inverse_ADJ(self, iszerofunc=_iszero):
2599        """Calculates the inverse using the adjugate matrix and a determinant.
2600
2601        See Also
2602        ========
2603
2604        diofant.matrices.matrices.MatrixBase.inv
2605        inverse_LU
2606        inverse_GE
2607
2608        """
2609        if not self.is_square:
2610            raise NonSquareMatrixError('A Matrix must be square to invert.')
2611
2612        d = self.berkowitz_det()
2613        zero = d.equals(0)
2614        if zero:
2615            raise ValueError('Matrix det == 0; not invertible.')
2616
2617        return self.adjugate() / d
2618
2619    def rref(self, iszerofunc=_iszero, simplify=False):
2620        """Return reduced row-echelon form of matrix and indices of pivot vars.
2621
2622        To simplify elements before finding nonzero pivots set simplify=True
2623        (to use the default Diofant simplify function) or pass a custom
2624        simplify function.
2625
2626        Examples
2627        ========
2628
2629        >>> m = Matrix([[1, 2], [x, 1 - 1/x]])
2630        >>> m.rref()
2631        (Matrix([
2632        [1, 0],
2633        [0, 1]]), [0, 1])
2634
2635        """
2636        simpfunc = simplify if isinstance(
2637            simplify, FunctionType) else _simplify
2638        # pivot: index of next row to contain a pivot
2639        pivot, r = 0, self.as_mutable()
2640        # pivotlist: indices of pivot variables (non-free)
2641        pivotlist = []
2642        for i in range(r.cols):
2643            if pivot == r.rows:
2644                break
2645            if simplify:
2646                r[pivot, i] = simpfunc(r[pivot, i])
2647            if iszerofunc(r[pivot, i]):
2648                for k in range(pivot, r.rows):
2649                    if simplify and k > pivot:
2650                        r[k, i] = simpfunc(r[k, i])
2651                    if not iszerofunc(r[k, i]):
2652                        r.row_swap(pivot, k)
2653                        break
2654                else:
2655                    continue
2656            scale = r[pivot, i]
2657            r.row_op(pivot, lambda x, _: x / scale)
2658            for j in range(r.rows):
2659                if j == pivot:
2660                    continue
2661                scale = r[j, i]
2662                r.zip_row_op(j, pivot, lambda x, y: x - y*scale)
2663            pivotlist.append(i)
2664            pivot += 1
2665        return self._new(r), pivotlist
2666
2667    def rank(self, iszerofunc=_iszero, simplify=False):
2668        """
2669        Returns the rank of a matrix
2670
2671        >>> m = Matrix([[1, 2], [x, 1 - 1/x]])
2672        >>> m.rank()
2673        2
2674        >>> n = Matrix(3, 3, range(1, 10))
2675        >>> n.rank()
2676        2
2677
2678        """
2679        row_reduced = self.rref(iszerofunc=iszerofunc, simplify=simplify)
2680        rank = len(row_reduced[-1])
2681        return rank
2682
2683    def nullspace(self, simplify=False, iszerofunc=_iszero):
2684        """Returns list of vectors (Matrix objects) that span nullspace of self."""
2685        from . import zeros
2686
2687        simpfunc = simplify if isinstance(
2688            simplify, FunctionType) else _simplify
2689        reduced, pivots = self.rref(simplify=simpfunc, iszerofunc=iszerofunc)
2690
2691        basis = []
2692        # create a set of vectors for the basis
2693        for i in range(self.cols - len(pivots)):
2694            basis.append(zeros(self.cols, 1))
2695        # contains the variable index to which the vector corresponds
2696        basiskey, cur = [-1]*len(basis), 0
2697        for i in range(self.cols):
2698            if i not in pivots:
2699                basiskey[cur] = i
2700                cur += 1
2701        for i in range(self.cols):
2702            if i not in pivots:  # free var, just set vector's ith place to 1
2703                basis[basiskey.index(i)][i, 0] = 1
2704            else:               # add negative of nonpivot entry to corr vector
2705                for j in range(i + 1, self.cols):
2706                    line = pivots.index(i)
2707                    v = reduced[line, j]
2708                    if simplify:
2709                        v = simpfunc(v)
2710                    if v:
2711                        if j in pivots:
2712                            # XXX: Is this the correct error?
2713                            raise NotImplementedError("Couldn't compute the "
2714                                                      'nullspace of `self`.')
2715                        basis[basiskey.index(j)][i, 0] = -v
2716        return [self._new(b) for b in basis]
2717
2718    def berkowitz(self):
2719        """The Berkowitz algorithm.
2720
2721           Given N x N matrix with symbolic content, compute efficiently
2722           coefficients of characteristic polynomials of 'self' and all
2723           its square sub-matrices composed by removing both i-th row
2724           and column, without division in the ground domain.
2725
2726           This method is particularly useful for computing determinant,
2727           principal minors and characteristic polynomial, when 'self'
2728           has complicated coefficients e.g. polynomials. Semi-direct
2729           usage of this algorithm is also important in computing
2730           efficiently sub-resultant PRS.
2731
2732           Assuming that M is a square matrix of dimension N x N and
2733           I is N x N identity matrix,  then the following following
2734           definition of characteristic polynomial is begin used:
2735
2736                          charpoly(M) = det(t*I - M)
2737
2738           As a consequence, all polynomials generated by Berkowitz
2739           algorithm are monic.
2740
2741           >>> M = Matrix([[x, y, z], [1, 0, 0], [y, z, x]])
2742
2743           >>> p, q, r = M.berkowitz()
2744
2745           >>> p  # 1 x 1 M's sub-matrix
2746           (1, -x)
2747
2748           >>> q  # 2 x 2 M's sub-matrix
2749           (1, -x, -y)
2750
2751           >>> r  # 3 x 3 M's sub-matrix
2752           (1, -2*x, x**2 - y*z - y, x*y - z**2)
2753
2754           For more information on the implemented algorithm refer to:
2755
2756           [1] S.J. Berkowitz, On computing the determinant in small
2757               parallel time using a small number of processors, ACM,
2758               Information Processing Letters 18, 1984, pp. 147-150
2759
2760           [2] M. Keber, Division-Free computation of sub-resultants
2761               using Bezout matrices, Tech. Report MPI-I-2006-1-006,
2762               Saarbrücken, 2006
2763
2764        See Also
2765        ========
2766
2767        berkowitz_det
2768        berkowitz_minors
2769        berkowitz_charpoly
2770        berkowitz_eigenvals
2771
2772        """
2773        from . import zeros
2774
2775        if not self.is_square:
2776            raise NonSquareMatrixError()
2777
2778        A, N = self, self.rows
2779        transforms = [0]*(N - 1)
2780
2781        for n in range(N, 1, -1):
2782            T, k = zeros(n + 1, n), n - 1
2783
2784            R, C = -A[k, :k], A[:k, k]
2785            A, a = A[:k, :k], -A[k, k]
2786
2787            items = [C]
2788
2789            for i in range(n - 2):
2790                items.append(A*items[i])
2791
2792            for i, B in enumerate(items):
2793                items[i] = (R*B)[0, 0]
2794
2795            items = [Integer(1), a] + items
2796
2797            for i in range(n):
2798                T[i:, i] = items[:n - i + 1]
2799
2800            transforms[k - 1] = T
2801
2802        polys = [self._new([Integer(1), -A[0, 0]])]
2803
2804        for i, T in enumerate(transforms):
2805            polys.append(T*polys[i])
2806
2807        return tuple(map(tuple, polys))
2808
2809    def berkowitz_det(self):
2810        """Computes determinant using Berkowitz method.
2811
2812        See Also
2813        ========
2814
2815        det
2816        berkowitz
2817
2818        """
2819        if not self.is_square:
2820            raise NonSquareMatrixError()
2821        if not self:
2822            return Integer(1)
2823        poly = self.berkowitz()[-1]
2824        sign = (-1)**(len(poly) - 1)
2825        return sign*poly[-1]
2826
2827    def berkowitz_minors(self):
2828        """Computes principal minors using Berkowitz method.
2829
2830        See Also
2831        ========
2832
2833        berkowitz
2834
2835        """
2836        sign, minors = Integer(-1), []
2837
2838        for poly in self.berkowitz():
2839            minors.append(sign*poly[-1])
2840            sign = -sign
2841
2842        return tuple(minors)
2843
2844    def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
2845        """Computes characteristic polynomial minors using Berkowitz method.
2846
2847        A PurePoly is returned so using different variables for ``x`` does
2848        not affect the comparison or the polynomials:
2849
2850        Examples
2851        ========
2852
2853        >>> A = Matrix([[1, 3], [2, 0]])
2854        >>> A.berkowitz_charpoly(x) == A.berkowitz_charpoly(y)
2855        True
2856
2857        Specifying ``x`` is optional; a Dummy with name ``lambda`` is used by
2858        default (which looks good when pretty-printed in unicode):
2859
2860        >>> A.berkowitz_charpoly().as_expr()
2861        _lambda**2 - _lambda - 6
2862
2863        Be sure your provided ``x`` doesn't clash with existing symbols:
2864
2865        >>> A = Matrix([[1, 2], [x, 0]])
2866        >>> A.charpoly().as_expr()
2867        -2*x + _lambda**2 - _lambda
2868        >>> A.charpoly(x).as_expr()
2869        Traceback (most recent call last):
2870        ...
2871        GeneratorsError: polynomial ring and it's ground domain share generators
2872        >>> A.charpoly(y).as_expr()
2873        -2*x + y**2 - y
2874
2875        See Also
2876        ========
2877
2878        berkowitz
2879
2880        """
2881        return PurePoly(list(map(simplify, reversed(self.berkowitz()[-1]))), x)
2882
2883    charpoly = berkowitz_charpoly
2884
2885    def berkowitz_eigenvals(self, **flags):
2886        """Computes eigenvalues of a Matrix using Berkowitz method.
2887
2888        See Also
2889        ========
2890
2891        berkowitz
2892
2893        """
2894        return roots(self.berkowitz_charpoly(Dummy('x')), **flags)
2895
2896    def eigenvals(self, **flags):
2897        """Return eigen values using the berkowitz_eigenvals routine.
2898
2899        Since the roots routine doesn't always work well with Floats,
2900        they will be replaced with Rationals before calling that
2901        routine. If this is not desired, set flag ``rational`` to False.
2902
2903        """
2904        # roots doesn't like Floats, so replace them with Rationals
2905        # unless the nsimplify flag indicates that this has already
2906        # been done, e.g. in eigenvects
2907        mat = self
2908        if not mat:
2909            return {}
2910        if flags.pop('rational', True):
2911            if any(v.has(Float) for v in mat):
2912                mat = mat._new(mat.rows, mat.cols,
2913                               [nsimplify(v, rational=True) for v in mat])
2914
2915        flags.pop('simplify', None)  # pop unsupported flag
2916        return mat.berkowitz_eigenvals(**flags)
2917
2918    def eigenvects(self, **flags):
2919        """Return list of triples (eigenval, multiplicity, basis).
2920
2921        The flag ``simplify`` has two effects:
2922            1) if bool(simplify) is True, as_content_primitive()
2923            will be used to tidy up normalization artifacts;
2924            2) if nullspace needs simplification to compute the
2925            basis, the simplify flag will be passed on to the
2926            nullspace routine which will interpret it there.
2927
2928        If the matrix contains any Floats, they will be changed to Rationals
2929        for computation purposes, but the answers will be returned after being
2930        evaluated with evalf. If it is desired to removed small imaginary
2931        portions during the evalf step, pass a value for the ``chop`` flag.
2932
2933        """
2934        from . import eye
2935
2936        primitive = bool(flags.get('simplify', False))
2937        chop = flags.pop('chop', False)
2938
2939        flags.pop('multiple', None)  # remove this if it's there
2940
2941        # roots doesn't like Floats, so replace them with Rationals
2942        float = False
2943        mat = self
2944        if any(v.has(Float) for v in self):
2945            float = True
2946            mat = mat._new(mat.rows, mat.cols, [nsimplify(
2947                v, rational=True) for v in mat])
2948            flags['rational'] = False  # to tell eigenvals not to do this
2949
2950        out, vlist = [], mat.eigenvals(**flags)
2951        vlist = list(vlist.items())
2952        vlist.sort(key=default_sort_key)
2953        flags.pop('rational', None)
2954
2955        for r, k in vlist:
2956            tmp = mat.as_mutable() - eye(mat.rows)*r
2957            basis = tmp.nullspace()
2958            # whether tmp.is_symbolic() is True or False, it is possible that
2959            # the basis will come back as [] in which case simplification is
2960            # necessary.
2961
2962            if primitive:
2963                # the relationship A*e = lambda*e will still hold if we change the
2964                # eigenvector; so if simplify is True we tidy up any normalization
2965                # artifacts with as_content_primtive (default) and remove any pure Integer
2966                # denominators.
2967                l = 1
2968                for i, b in enumerate(basis[0]):
2969                    c, p = signsimp(b).as_content_primitive()
2970                    if c != 1:
2971                        b = c*p
2972                        l = math.lcm(l, c.denominator)
2973                    basis[0][i] = b
2974                if l != 1:
2975                    basis[0] *= l
2976            if float:
2977                out.append((r.evalf(chop=chop), k, [
2978                           mat._new(b).evalf(chop=chop) for b in basis]))
2979            else:
2980                out.append((r, k, [mat._new(b) for b in basis]))
2981        return out
2982
2983    def singular_values(self):
2984        """Compute the singular values of a Matrix
2985
2986        Examples
2987        ========
2988
2989        >>> x = Symbol('x', real=True)
2990        >>> A = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
2991        >>> A.singular_values()
2992        [sqrt(x**2 + 1), 1, 0]
2993
2994        See Also
2995        ========
2996
2997        condition_number
2998
2999        """
3000        mat = self.as_mutable()
3001        # Compute eigenvalues of A.H A
3002        valmultpairs = (mat.H*mat).eigenvals()
3003
3004        # Expands result from eigenvals into a simple list
3005        vals = []
3006        for k, v in valmultpairs.items():
3007            vals += [sqrt(k)]*v  # dangerous! same k in several spots!
3008        # sort them in descending order
3009        vals.sort(reverse=True, key=default_sort_key)
3010
3011        return vals
3012
3013    def condition_number(self):
3014        """Returns the condition number of a matrix.
3015
3016        This is the maximum singular value divided by the minimum singular value
3017
3018        Examples
3019        ========
3020
3021        >>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, Rational(1, 10)]])
3022        >>> A.condition_number()
3023        100
3024
3025        See Also
3026        ========
3027
3028        singular_values
3029
3030        """
3031        if not self:
3032            return Integer(0)
3033        singularvalues = self.singular_values()
3034        return Max(*singularvalues) / Min(*singularvalues)
3035
3036    def integrate(self, *args):
3037        """Integrate each element of the matrix.
3038
3039        Examples
3040        ========
3041
3042        >>> M = Matrix([[x, y], [1, 0]])
3043        >>> M.integrate(x)
3044        Matrix([
3045        [x**2/2, x*y],
3046        [     x,   0]])
3047        >>> M.integrate((x, 0, 2))
3048        Matrix([
3049        [2, 2*y],
3050        [2,   0]])
3051
3052        See Also
3053        ========
3054
3055        limit
3056        diff
3057
3058        """
3059        return self._new(self.rows, self.cols,
3060                         lambda i, j: self[i, j].integrate(*args))
3061
3062    def limit(self, *args):
3063        """Calculate the limit of each element in the matrix.
3064
3065        Examples
3066        ========
3067
3068        >>> M = Matrix([[x, y], [1, 0]])
3069        >>> M.limit(x, 2)
3070        Matrix([
3071        [2, y],
3072        [1, 0]])
3073
3074        See Also
3075        ========
3076
3077        integrate
3078        diff
3079
3080        """
3081        return self._new(self.rows, self.cols,
3082                         lambda i, j: self[i, j].limit(*args))
3083
3084    def diff(self, *args):
3085        """Calculate the derivative of each element in the matrix.
3086
3087        Examples
3088        ========
3089
3090        >>> M = Matrix([[x, y], [1, 0]])
3091        >>> M.diff(x)
3092        Matrix([
3093        [1, 0],
3094        [0, 0]])
3095
3096        See Also
3097        ========
3098
3099        integrate
3100        limit
3101
3102        """
3103        return self._new(self.rows, self.cols,
3104                         lambda i, j: self[i, j].diff(*args))
3105
3106    def vec(self):
3107        """Return the Matrix converted into a one column matrix by stacking columns
3108
3109        Examples
3110        ========
3111
3112        >>> m = Matrix([[1, 3], [2, 4]])
3113        >>> m
3114        Matrix([
3115        [1, 3],
3116        [2, 4]])
3117        >>> m.vec()
3118        Matrix([
3119        [1],
3120        [2],
3121        [3],
3122        [4]])
3123
3124        See Also
3125        ========
3126
3127        vech
3128
3129        """
3130        return self.T.reshape(len(self), 1)
3131
3132    def vech(self, diagonal=True, check_symmetry=True):
3133        """Return the unique elements of a symmetric Matrix as a one column matrix
3134        by stacking the elements in the lower triangle.
3135
3136        Arguments:
3137        diagonal -- include the diagonal cells of self or not
3138        check_symmetry -- checks symmetry of self but not completely reliably
3139
3140        Examples
3141        ========
3142
3143        >>> m = Matrix([[1, 2], [2, 3]])
3144        >>> m
3145        Matrix([
3146        [1, 2],
3147        [2, 3]])
3148        >>> m.vech()
3149        Matrix([
3150        [1],
3151        [2],
3152        [3]])
3153        >>> m.vech(diagonal=False)
3154        Matrix([[2]])
3155
3156        See Also
3157        ========
3158
3159        vec
3160
3161        """
3162        from . import zeros
3163
3164        c = self.cols
3165        if c != self.rows:
3166            raise ShapeError('Matrix must be square')
3167        if check_symmetry:
3168            self.simplify()
3169            if self != self.transpose():
3170                raise ValueError('Matrix appears to be asymmetric; consider check_symmetry=False')
3171        count = 0
3172        if diagonal:
3173            v = zeros(c*(c + 1) // 2, 1)
3174            for j in range(c):
3175                for i in range(j, c):
3176                    v[count] = self[i, j]
3177                    count += 1
3178        else:
3179            v = zeros(c*(c - 1) // 2, 1)
3180            for j in range(c):
3181                for i in range(j + 1, c):
3182                    v[count] = self[i, j]
3183                    count += 1
3184        return v
3185
3186    def get_diag_blocks(self):
3187        """Obtains the square sub-matrices on the main diagonal of a square matrix.
3188
3189        Useful for inverting symbolic matrices or solving systems of
3190        linear equations which may be decoupled by having a block diagonal
3191        structure.
3192
3193        Examples
3194        ========
3195
3196        >>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
3197        >>> a1, a2, a3 = A.get_diag_blocks()
3198        >>> a1
3199        Matrix([
3200        [1,    3],
3201        [y, z**2]])
3202        >>> a2
3203        Matrix([[x]])
3204        >>> a3
3205        Matrix([[0]])
3206
3207        """
3208        sub_blocks = []
3209
3210        def recurse_sub_blocks(M):
3211            i = 1
3212            while i <= M.rows:
3213                if i == 1:
3214                    to_the_right = M[0, i:]
3215                    to_the_bottom = M[i:, 0]
3216                else:
3217                    to_the_right = M[:i, i:]
3218                    to_the_bottom = M[i:, :i]
3219                if not any(to_the_right) and not any(to_the_bottom):
3220                    sub_blocks.append(M[:i, :i])
3221                    if M.shape != M[:i, :i].shape:
3222                        return recurse_sub_blocks(M[i:, i:])
3223                i += 1
3224        recurse_sub_blocks(self)
3225        return sub_blocks
3226
3227    def diagonalize(self, reals_only=False, sort=False, normalize=False):
3228        """
3229        Return (P, D), where D is diagonal and
3230
3231            D = P^-1 * M * P
3232
3233        where M is current matrix.
3234
3235        Examples
3236        ========
3237
3238        >>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
3239        >>> m
3240        Matrix([
3241        [1,  2, 0],
3242        [0,  3, 0],
3243        [2, -4, 2]])
3244        >>> (P, D) = m.diagonalize()
3245        >>> D
3246        Matrix([
3247        [1, 0, 0],
3248        [0, 2, 0],
3249        [0, 0, 3]])
3250        >>> P
3251        Matrix([
3252        [-1, 0, -1],
3253        [ 0, 0, -1],
3254        [ 2, 1,  2]])
3255        >>> P.inv() * m * P
3256        Matrix([
3257        [1, 0, 0],
3258        [0, 2, 0],
3259        [0, 0, 3]])
3260
3261        See Also
3262        ========
3263
3264        is_diagonal
3265        is_diagonalizable
3266
3267        """
3268        from . import diag
3269
3270        if not self.is_square:
3271            raise NonSquareMatrixError()
3272        if not self.is_diagonalizable(reals_only, False):
3273            self._diagonalize_clear_subproducts()
3274            raise MatrixError('Matrix is not diagonalizable')
3275        else:
3276            assert self._eigenvects is not None
3277            if sort:
3278                self._eigenvects.sort(key=default_sort_key)
3279                self._eigenvects.reverse()
3280            diagvals = []
3281            P = self._new(self.rows, 0, [])
3282            for eigenval, multiplicity, vects in self._eigenvects:
3283                for k in range(multiplicity):
3284                    diagvals.append(eigenval)
3285                    vec = vects[k]
3286                    if normalize:
3287                        vec = vec / vec.norm()
3288                    P = P.col_insert(P.cols, vec)
3289            D = diag(*diagvals)
3290            self._diagonalize_clear_subproducts()
3291            return P, D
3292
3293    def is_diagonalizable(self, reals_only=False, clear_subproducts=True):
3294        """Check if matrix is diagonalizable.
3295
3296        If reals_only==True then check that diagonalized matrix consists of the only not complex values.
3297
3298        Some subproducts could be used further in other methods to avoid double calculations,
3299        By default (if clear_subproducts==True) they will be deleted.
3300
3301        Examples
3302        ========
3303
3304        >>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
3305        >>> m
3306        Matrix([
3307        [1,  2, 0],
3308        [0,  3, 0],
3309        [2, -4, 2]])
3310        >>> m.is_diagonalizable()
3311        True
3312        >>> m = Matrix(2, 2, [0, 1, 0, 0])
3313        >>> m
3314        Matrix([
3315        [0, 1],
3316        [0, 0]])
3317        >>> m.is_diagonalizable()
3318        False
3319        >>> m = Matrix(2, 2, [0, 1, -1, 0])
3320        >>> m
3321        Matrix([
3322        [ 0, 1],
3323        [-1, 0]])
3324        >>> m.is_diagonalizable()
3325        True
3326        >>> m.is_diagonalizable(True)
3327        False
3328
3329        See Also
3330        ========
3331
3332        is_diagonal
3333        diagonalize
3334
3335        """
3336        if not self.is_square:
3337            return False
3338        res = False
3339        self._is_symbolic = self.is_symbolic()
3340        self._is_symmetric = self.is_symmetric()
3341        self._eigenvects = None
3342        self._eigenvects = self.eigenvects(simplify=True)
3343        all_iscorrect = True
3344        for eigenval, multiplicity, vects in self._eigenvects:
3345            if len(vects) != multiplicity:
3346                all_iscorrect = False
3347                break
3348            elif reals_only and not eigenval.is_extended_real:
3349                all_iscorrect = False
3350                break
3351        res = all_iscorrect
3352        if clear_subproducts:
3353            self._diagonalize_clear_subproducts()
3354        return res
3355
3356    def _diagonalize_clear_subproducts(self):
3357        del self._is_symbolic
3358        del self._is_symmetric
3359        del self._eigenvects
3360
3361    def jordan_cell(self, eigenval, n):
3362        n = int(n)
3363        from . import MutableMatrix
3364        out = MutableMatrix.zeros(n)
3365        for i in range(n-1):
3366            out[i, i] = eigenval
3367            out[i, i+1] = 1
3368        out[n-1, n-1] = eigenval
3369        return type(self)(out)
3370
3371    def _jordan_block_structure(self):
3372        # To every eigenvalue may belong `i` blocks with size s(i)
3373        # and a chain of generalized eigenvectors
3374        # which will be determined by the following computations:
3375        # for every eigenvalue we will add a dictionary
3376        # containing, for all blocks, the blocksizes and the attached chain vectors
3377        # that will eventually be used to form the transformation P
3378        jordan_block_structures = {}
3379        _eigenvects = self.eigenvects()
3380        ev = self.eigenvals()
3381        if len(ev) == 0:
3382            raise AttributeError('could not compute the eigenvalues')
3383        for eigenval, multiplicity, vects in _eigenvects:
3384            l_jordan_chains = {}
3385            geometrical = len(vects)
3386            assert geometrical != 0
3387            if geometrical == multiplicity:
3388                # The Jordan chains have all length 1 and consist of only one vector
3389                # which is the eigenvector of course
3390                chains = []
3391                for v in vects:
3392                    chain = [v]
3393                    chains.append(chain)
3394                l_jordan_chains[1] = chains
3395                jordan_block_structures[eigenval] = l_jordan_chains
3396            else:
3397                # Up to now we know nothing about the sizes of the blocks of our Jordan matrix.
3398                # Note that knowledge of algebraic and geometrical multiplicity
3399                # will *NOT* be sufficient to determine this structure.
3400                # The blocksize `s` could be defined as the minimal `k` where
3401                # `kernel(self-lI)^k = kernel(self-lI)^(k+1)`
3402                # The extreme case would be that k = (multiplicity-geometrical+1)
3403                # but the blocks could be smaller.
3404
3405                # Consider for instance the following matrix
3406
3407                # [2 1 0 0]
3408                # [0 2 1 0]
3409                # [0 0 2 0]
3410                # [0 0 0 2]
3411
3412                # which coincides with it own Jordan canonical form.
3413                # It has only one eigenvalue l=2 of (algebraic) multiplicity=4.
3414                # It has two eigenvectors, one belonging to the last row (blocksize 1)
3415                # and one being the last part of a jordan chain of length 3 (blocksize of the first block).
3416
3417                # Note again that it is not not possible to obtain this from the algebraic and geometrical
3418                # multiplicity alone. This only gives us an upper limit for the dimension of one of
3419                # the subspaces (blocksize of according jordan block) given by
3420                # max=(multiplicity-geometrical+1) which is reached for our matrix
3421                # but not for
3422
3423                # [2 1 0 0]
3424                # [0 2 0 0]
3425                # [0 0 2 1]
3426                # [0 0 0 2]
3427
3428                # although multiplicity=4 and geometrical=2 are the same for this matrix.
3429
3430                from . import MutableMatrix
3431                I = MutableMatrix.eye(self.rows)
3432                l = eigenval
3433                M = (self-l*I)
3434
3435                # We will store the matrices `(self-l*I)^k` for further computations
3436                # for convenience only we store `Ms[0]=(sefl-lI)^0=I`
3437                # so the index is the same as the power for all further Ms entries
3438                # We also store the vectors that span these kernels (Ns[0] = [])
3439                # and also their dimensions `a_s`
3440                # this is mainly done for debugging since the number of blocks of a given size
3441                # can be computed from the a_s, in order to check our result which is obtained simpler
3442                # by counting the number of Jordan chains for `a` given `s`
3443                # `a_0` is `dim(Kernel(Ms[0]) = dim (Kernel(I)) = 0` since `I` is regular
3444
3445                l_jordan_chains = {}
3446                Ms = [I]
3447                Ns = [[]]
3448                a = [0]
3449                smax = 0
3450                M_new = Ms[-1]*M
3451                Ns_new = M_new.nullspace()
3452                a_new = len(Ns_new)
3453                Ms.append(M_new)
3454                Ns.append(Ns_new)
3455                while a_new > a[-1]:  # as long as the nullspaces increase compute further powers
3456                    a.append(a_new)
3457                    M_new = Ms[-1]*M
3458                    Ns_new = M_new.nullspace()
3459                    a_new = len(Ns_new)
3460                    Ms.append(M_new)
3461                    Ns.append(Ns_new)
3462                    smax += 1
3463
3464                # We now have `Ms[-1]=((self-l*I)**s)=Z=0`.
3465                # We also know the size of the biggest Jordan block
3466                # associated with `l` to be `s`.
3467                # Now let us proceed with the computation of the associate part of the transformation matrix `P`.
3468                # We already know the kernel (=nullspace)  `K_l` of (self-lI) which consists of the
3469                # eigenvectors belonging to eigenvalue `l`.
3470                # The dimension of this space is the geometric multiplicity of eigenvalue `l`.
3471                # For every eigenvector ev out of `K_l`, there exists a subspace that is
3472                # spanned by the Jordan chain of ev. The dimension of this subspace is
3473                # represented by the length `s` of the Jordan block.
3474                # The chain itself is given by `{e_0,..,e_s-1}` where:
3475                # `e_k+1 =(self-lI)e_k (*)`
3476                # and
3477                # `e_s-1=ev`
3478                # So it would be possible to start with the already known `ev` and work backwards until one
3479                # reaches `e_0`. Unfortunately this can not be done by simply solving system (*) since its matrix
3480                # is singular (by definition of the eigenspaces).
3481                # This approach would force us a choose in every step the degree of freedom undetermined
3482                # by (*). This is difficult to implement with computer algebra systems and also quite inefficient.
3483                # We therefore reformulate the problem in terms of nullspaces.
3484                # To do so we start from the other end and choose `e0`'s out of
3485                # `E=Kernel(self-lI)^s / Kernel(self-lI)^(s-1)`
3486                # Note that `Kernel(self-lI)^s = Kernel(Z) = V` (the whole vector space).
3487                # So in the first step `s=smax` this restriction turns out to actually restrict nothing at all
3488                # and the only remaining condition is to choose vectors in `Kernel(self-lI)^(s-1)`.
3489                # Subsequently we compute `e_1=(self-lI)e_0`, `e_2=(self-lI)*e_1` and so on.
3490                # The subspace `E` can have a dimension larger than one.
3491                # That means that we have more than one Jordan block of size `s` for the eigenvalue `l`
3492                # and as many Jordan chains (this is the case in the second example).
3493                # In this case we start as many Jordan chains and have as many blocks of size `s` in the jcf.
3494                # We now have all the Jordan blocks of size `s` but there might be others attached to the same
3495                # eigenvalue that are smaller.
3496                # So we will do the same procedure also for `s-1` and so on until 1 (the lowest possible order
3497                # where the Jordan chain is of length 1 and just represented by the eigenvector).
3498
3499                for s in reversed(range(1, smax+1)):
3500                    S = Ms[s]
3501                    # We want the vectors in `Kernel((self-lI)^s)`,
3502                    # but without those in `Kernel(self-lI)^s-1`
3503                    # so we will add their adjoints as additional equations
3504                    # to the system formed by `S` to get the orthogonal
3505                    # complement.
3506                    # (`S` will no longer be quadratic.)
3507
3508                    exclude_vectors = Ns[s-1]
3509                    for k in range(a[s-1]):
3510                        S = S.col_join((exclude_vectors[k]).adjoint())
3511
3512                    # We also want to exclude the vectors
3513                    # in the chains for the bigger blocks
3514                    # that we have already computed (if there are any).
3515                    # (That is why we start with the biggest s).
3516
3517                    # Since Jordan blocks are not orthogonal in general
3518                    # (in the original space), only those chain vectors
3519                    # that are on level s (index `s-1` in a chain)
3520                    # are added.
3521
3522                    for chain_list in l_jordan_chains.values():
3523                        for chain in chain_list:
3524                            S = S.col_join(chain[s-1].adjoint())
3525
3526                    e0s = S.nullspace()
3527                    # Determine the number of chain leaders
3528                    # for blocks of size `s`.
3529                    n_e0 = len(e0s)
3530                    s_chains = []
3531                    # s_cells=[]
3532                    for i in range(n_e0):
3533                        chain = [e0s[i]]
3534                        for k in range(1, s):
3535                            v = M*chain[k-1]
3536                            chain.append(v)
3537
3538                        # We want the chain leader appear as the last of the block.
3539                        chain.reverse()
3540                        s_chains.append(chain)
3541                    l_jordan_chains[s] = s_chains
3542            jordan_block_structures[eigenval] = l_jordan_chains
3543        return jordan_block_structures
3544
3545    def jordan_form(self, calc_transformation=True):
3546        r"""Return Jordan form J of current matrix.
3547
3548        Also (if calc_transformation=True) the transformation P such that
3549
3550        .. math::
3551            J = P^{-1} \cdot M \cdot P
3552
3553        and the jordan blocks forming J will be calculated.
3554
3555        Examples
3556        ========
3557
3558        >>> m = Matrix([[+6,  5, -2, -3],
3559        ...             [-3, -1,  3,  3],
3560        ...             [+2,  1, -2, -3],
3561        ...             [-1,  1,  5,  5]])
3562        >>> J, P = m.jordan_form()
3563        >>> J
3564        Matrix([
3565        [2, 1, 0, 0],
3566        [0, 2, 0, 0],
3567        [0, 0, 2, 1],
3568        [0, 0, 0, 2]])
3569
3570        See Also
3571        ========
3572
3573        jordan_cells
3574
3575        """
3576        from . import diag
3577        res = self.jordan_cells(calc_transformation=calc_transformation)
3578        if calc_transformation:
3579            Jcells, P = res
3580        else:
3581            Jcells = res
3582        J = diag(*Jcells)
3583        J = type(self)(J)
3584        if calc_transformation:
3585            return J, P
3586        else:
3587            return J
3588
3589    def jordan_cells(self, calc_transformation=True):
3590        r"""Return a list of Jordan cells of current matrix.
3591        This list shape Jordan matrix J.
3592
3593        If calc_transformation=False, then transformation P such that
3594
3595        .. math::
3596            J = P^{-1} \cdot M \cdot P
3597
3598        will not be calculated.
3599
3600        Examples
3601        ========
3602
3603        >>> m = Matrix([[+6,  5, -2, -3],
3604        ...             [-3, -1,  3,  3],
3605        ...             [+2,  1, -2, -3],
3606        ...             [-1,  1,  5,  5]])
3607
3608        >>> Jcells, P = m.jordan_cells()
3609        >>> Jcells[0]
3610        Matrix([
3611        [2, 1],
3612        [0, 2]])
3613        >>> Jcells[1]
3614        Matrix([
3615        [2, 1],
3616        [0, 2]])
3617
3618        See Also
3619        ========
3620
3621        jordan_form
3622
3623        """
3624        from . import MutableMatrix
3625
3626        n = self.rows
3627        Jcells = []
3628        Pcols_new = []
3629        jordan_block_structures = self._jordan_block_structure()
3630
3631        # Order according to default_sort_key, this makes sure the order is the same as in .diagonalize():
3632        for eigenval in (sorted(jordan_block_structures, key=default_sort_key)):
3633            l_jordan_chains = jordan_block_structures[eigenval]
3634            for s in sorted(l_jordan_chains, reverse=True):  # Start with the biggest block
3635                s_chains = l_jordan_chains[s]
3636                block = self.jordan_cell(eigenval, s)
3637                number_of_s_chains = len(s_chains)
3638                for i in range(number_of_s_chains):
3639                    Jcells.append(type(self)(block))
3640                    chain_vectors = s_chains[i]
3641                    lc = len(chain_vectors)
3642                    assert lc == s
3643                    for j in range(lc):
3644                        generalized_eigen_vector = chain_vectors[j]
3645                        Pcols_new.append(generalized_eigen_vector)
3646
3647        if calc_transformation:
3648            P = MutableMatrix.zeros(n)
3649            for j in range(n):
3650                P[:, j] = Pcols_new[j]
3651            return Jcells, type(self)(P)
3652        else:
3653            return Jcells
3654
3655    def has(self, *patterns):
3656        """Test whether any subexpression matches any of the patterns.
3657
3658        Examples
3659        ========
3660
3661        >>> A = Matrix(((1, x), (0.2, 3)))
3662        >>> A.has(x)
3663        True
3664        >>> A.has(y)
3665        False
3666        >>> A.has(Float)
3667        True
3668
3669        """
3670        return any(a.has(*patterns) for a in self._mat)
3671
3672    def dual(self):
3673        """Returns the dual of a matrix, which is:
3674
3675        `(1/2)*levicivita(i, j, k, l)*M(k, l)` summed over indices `k` and `l`
3676
3677        Since the levicivita method is anti_symmetric for any pairwise
3678        exchange of indices, the dual of a symmetric matrix is the zero
3679        matrix. Strictly speaking the dual defined here assumes that the
3680        'matrix' `M` is a contravariant anti_symmetric second rank tensor,
3681        so that the dual is a covariant second rank tensor.
3682
3683        """
3684        from ..functions import LeviCivita
3685        from . import zeros
3686
3687        M, n = self[:, :], self.rows
3688        work = zeros(n)
3689        if self.is_symmetric():
3690            return work
3691
3692        for i in range(1, n):
3693            for j in range(1, n):
3694                acum = 0
3695                for k in range(1, n):
3696                    acum += LeviCivita(i, j, 0, k)*M[0, k]
3697                work[i, j] = acum
3698                work[j, i] = -acum
3699
3700        for l in range(1, n):
3701            acum = 0
3702            for a in range(1, n):
3703                for b in range(1, n):
3704                    acum += LeviCivita(0, l, a, b)*M[a, b]
3705            acum /= 2
3706            work[0, l] = -acum
3707            work[l, 0] = acum
3708
3709        return work
3710
3711    @classmethod
3712    def hstack(cls, *args):
3713        """Return a matrix formed by joining args horizontally (i.e.
3714        by repeated application of row_join).
3715
3716        Examples
3717        ========
3718
3719        >>> Matrix.hstack(eye(2), 2*eye(2))
3720        Matrix([
3721        [1, 0, 2, 0],
3722        [0, 1, 0, 2]])
3723
3724        """
3725        _cls = type(args[0])
3726        return functools.reduce(_cls.row_join, args)
3727
3728    @classmethod
3729    def vstack(cls, *args):
3730        """Return a matrix formed by joining args vertically (i.e.
3731        by repeated application of col_join).
3732
3733        Examples
3734        ========
3735
3736        >>> Matrix.vstack(eye(2), 2*eye(2))
3737        Matrix([
3738        [1, 0],
3739        [0, 1],
3740        [2, 0],
3741        [0, 2]])
3742
3743        """
3744        _cls = type(args[0])
3745        return functools.reduce(_cls.col_join, args)
3746
3747    def row_join(self, rhs):
3748        """Concatenates two matrices along self's last and rhs's first column
3749
3750        Examples
3751        ========
3752
3753        >>> M = zeros(3)
3754        >>> V = ones(3, 1)
3755        >>> M.row_join(V)
3756        Matrix([
3757        [0, 0, 0, 1],
3758        [0, 0, 0, 1],
3759        [0, 0, 0, 1]])
3760
3761        See Also
3762        ========
3763
3764        col_join
3765
3766        """
3767        from . import MutableMatrix
3768
3769        if not self:
3770            return type(self)(rhs)
3771        if self.rows != rhs.rows:
3772            raise ShapeError('`self` and `rhs` must have the same'
3773                             ' number of rows.')
3774
3775        newmat = MutableMatrix.zeros(self.rows, self.cols + rhs.cols)
3776        newmat[:, :self.cols] = self
3777        newmat[:, self.cols:] = rhs
3778        return type(self)(newmat)
3779
3780    def col_join(self, bott):
3781        """Concatenates two matrices along self's last and bott's first row
3782
3783        Examples
3784        ========
3785
3786        >>> M = zeros(3)
3787        >>> V = ones(1, 3)
3788        >>> M.col_join(V)
3789        Matrix([
3790        [0, 0, 0],
3791        [0, 0, 0],
3792        [0, 0, 0],
3793        [1, 1, 1]])
3794
3795        See Also
3796        ========
3797
3798        row_join
3799
3800        """
3801        from . import MutableMatrix
3802
3803        if not self:
3804            return type(self)(bott)
3805        if self.cols != bott.cols:
3806            raise ShapeError('`self` and `bott` must have the same'
3807                             ' number of columns.')
3808
3809        newmat = MutableMatrix.zeros(self.rows + bott.rows, self.cols)
3810        newmat[:self.rows, :] = self
3811        newmat[self.rows:, :] = bott
3812        return type(self)(newmat)
3813
3814    def row_insert(self, pos, mti):
3815        """Insert one or more rows at the given row position.
3816
3817        Examples
3818        ========
3819
3820        >>> M = zeros(3)
3821        >>> V = ones(1, 3)
3822        >>> M.row_insert(1, V)
3823        Matrix([
3824        [0, 0, 0],
3825        [1, 1, 1],
3826        [0, 0, 0],
3827        [0, 0, 0]])
3828
3829        See Also
3830        ========
3831
3832        col_insert
3833
3834        """
3835        if not self:
3836            return type(self)(mti)
3837
3838        if pos == 0:
3839            return mti.col_join(self)
3840        elif pos < 0:
3841            pos = self.rows + pos
3842        if pos < 0:
3843            pos = 0
3844        elif pos > self.rows:
3845            pos = self.rows
3846
3847        if self.cols != mti.cols:
3848            raise ShapeError(
3849                '`self` and `mti` must have the same number of columns.')
3850
3851        newmat = self.zeros(self.rows + mti.rows, self.cols)
3852        i, j = pos, pos + mti.rows
3853        newmat[:i, :] = self[:i, :]
3854        newmat[i: j, :] = mti
3855        newmat[j:, :] = self[i:, :]
3856        return newmat
3857
3858    def col_insert(self, pos, mti):
3859        """Insert one or more columns at the given column position.
3860
3861        Examples
3862        ========
3863
3864        >>> M = zeros(3)
3865        >>> V = ones(3, 1)
3866        >>> M.col_insert(1, V)
3867        Matrix([
3868        [0, 1, 0, 0],
3869        [0, 1, 0, 0],
3870        [0, 1, 0, 0]])
3871
3872        See Also
3873        ========
3874
3875        row_insert
3876
3877        """
3878        from . import MutableMatrix
3879
3880        if not self:
3881            return type(self)(mti)
3882
3883        if pos == 0:
3884            return mti.row_join(self)
3885        elif pos < 0:
3886            pos = self.cols + pos
3887        if pos < 0:
3888            pos = 0
3889        elif pos > self.cols:
3890            pos = self.cols
3891
3892        if self.rows != mti.rows:
3893            raise ShapeError('self and mti must have the same number of rows.')
3894
3895        newmat = MutableMatrix.zeros(self.rows, self.cols + mti.cols)
3896        i, j = pos, pos + mti.cols
3897        newmat[:, :i] = self[:, :i]
3898        newmat[:, i:j] = mti
3899        newmat[:, j:] = self[:, i:]
3900        return type(self)(newmat)
3901
3902    def replace(self, F, G, exact=False):
3903        """Replaces Function F in Matrix entries with Function G.
3904
3905        Examples
3906        ========
3907
3908        >>> F, G = symbols('F, G', cls=Function)
3909        >>> M = Matrix(2, 2, lambda i, j: F(i+j))
3910        >>> M
3911        Matrix([
3912        [F(0), F(1)],
3913        [F(1), F(2)]])
3914        >>> N = M.replace(F, G)
3915        >>> N
3916        Matrix([
3917        [G(0), G(1)],
3918        [G(1), G(2)]])
3919
3920        """
3921        M = self[:, :]
3922
3923        return M.applyfunc(lambda x: x.replace(F, G))
3924
3925    def pinv(self):
3926        """Calculate the Moore-Penrose pseudoinverse of the matrix.
3927
3928        The Moore-Penrose pseudoinverse exists and is unique for any matrix.
3929        If the matrix is invertible, the pseudoinverse is the same as the
3930        inverse.
3931
3932        Examples
3933        ========
3934
3935        >>> Matrix([[1, 2, 3], [4, 5, 6]]).pinv()
3936        Matrix([
3937        [-17/18,  4/9],
3938        [  -1/9,  1/9],
3939        [ 13/18, -2/9]])
3940
3941        See Also
3942        ========
3943
3944        diofant.matrices.matrices.MatrixBase.inv
3945        pinv_solve
3946
3947        References
3948        ==========
3949
3950        * https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse
3951
3952        """
3953        A = self
3954        AH = self.H
3955        # Trivial case: pseudoinverse of all-zero matrix is its transpose.
3956        if A.is_zero:
3957            return AH
3958        if self.rows >= self.cols:
3959            return (AH * A).inv() * AH
3960        else:
3961            return AH * (A * AH).inv()
3962
3963    def pinv_solve(self, B, arbitrary_matrix=None):
3964        """Solve Ax = B using the Moore-Penrose pseudoinverse.
3965
3966        There may be zero, one, or infinite solutions.  If one solution
3967        exists, it will be returned.  If infinite solutions exist, one will
3968        be returned based on the value of arbitrary_matrix.  If no solutions
3969        exist, the least-squares solution is returned.
3970
3971        Parameters
3972        ==========
3973
3974        B : Matrix
3975            The right hand side of the equation to be solved for.  Must have
3976            the same number of rows as matrix A.
3977        arbitrary_matrix : Matrix
3978            If the system is underdetermined (e.g. A has more columns than
3979            rows), infinite solutions are possible, in terms of an arbitrary
3980            matrix.  This parameter may be set to a specific matrix to use
3981            for that purpose; if so, it must be the same shape as x, with as
3982            many rows as matrix A has columns, and as many columns as matrix
3983            B.  If left as None, an appropriate matrix containing dummy
3984            symbols in the form of ``wn_m`` will be used, with n and m being
3985            row and column position of each symbol.
3986
3987        Returns
3988        =======
3989
3990        x : Matrix
3991            The matrix that will satisfy Ax = B.  Will have as many rows as
3992            matrix A has columns, and as many columns as matrix B.
3993
3994        Examples
3995        ========
3996
3997        >>> A = Matrix([[1, 2, 3], [4, 5, 6]])
3998        >>> B = Matrix([7, 8])
3999        >>> A.pinv_solve(B)
4000        Matrix([
4001        [ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18],
4002        [-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9],
4003        [ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]])
4004        >>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0]))
4005        Matrix([
4006        [-55/18],
4007        [   1/9],
4008        [ 59/18]])
4009
4010        See Also
4011        ========
4012
4013        lower_triangular_solve
4014        upper_triangular_solve
4015        cholesky_solve
4016        diagonal_solve
4017        LDLsolve
4018        LUsolve
4019        QRsolve
4020        pinv
4021
4022        Notes
4023        =====
4024
4025        This may return either exact solutions or least squares solutions.
4026        To determine which, check ``A * A.pinv() * B == B``.  It will be
4027        True if exact solutions exist, and False if only a least-squares
4028        solution exists.  Be aware that the left hand side of that equation
4029        may need to be simplified to correctly compare to the right hand
4030        side.
4031
4032        References
4033        ==========
4034
4035        * https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system
4036
4037        """
4038        from . import eye
4039        A = self
4040        A_pinv = self.pinv()
4041        if arbitrary_matrix is None:
4042            rows, cols = A.cols, B.cols
4043            w = symbols(f'w:{rows}_:{cols}', cls=Dummy)
4044            arbitrary_matrix = self.__class__(cols, rows, w).T
4045        return A_pinv * B + (eye(A.cols) - A_pinv*A) * arbitrary_matrix
4046
4047
4048def classof(A, B):
4049    """
4050    Get the type of the result when combining matrices of different types.
4051
4052    Currently the strategy is that immutability is contagious.
4053
4054    Examples
4055    ========
4056
4057    >>> M = Matrix([[1, 2], [3, 4]])  # a Mutable Matrix
4058    >>> IM = ImmutableMatrix([[1, 2], [3, 4]])
4059    >>> classof(M, IM)
4060    <class 'diofant.matrices.immutable.ImmutableMatrix'>
4061
4062    """
4063    try:
4064        if A._class_priority > B._class_priority:
4065            return A.__class__
4066        else:
4067            return B.__class__
4068    except AttributeError:
4069        pass
4070    raise TypeError(f'Incompatible classes {A.__class__}, {B.__class__}')
4071
4072
4073def a2idx(j, n=None):
4074    """Return integer after making positive and validating against n."""
4075    if type(j) is not int:
4076        try:
4077            j = j.__index__()
4078        except AttributeError:
4079            raise IndexError(f'Invalid index a[{j!r}]')
4080    if n is not None:
4081        if j < 0:
4082            j += n
4083        if not (j >= 0 and j < n):
4084            raise IndexError(f'Index out of range: a[{j}]')
4085    return int(j)
4086
4087
4088def mgamma(mu, lower=False):
4089    r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard
4090    (Dirac) representation.
4091
4092    If you want `\gamma_\mu`, use ``gamma(mu, True)``.
4093
4094    We use a convention:
4095
4096    `\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3`
4097
4098    `\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5`
4099
4100    References
4101    ==========
4102
4103    * https://en.wikipedia.org/wiki/Gamma_matrices
4104
4105    """
4106    from . import Matrix
4107    if mu not in [0, 1, 2, 3, 5]:
4108        raise IndexError('Invalid Dirac index')
4109    if mu == 0:
4110        mat = (
4111            (1, 0, 0, 0),
4112            (0, 1, 0, 0),
4113            (0, 0, -1, 0),
4114            (0, 0, 0, -1)
4115        )
4116    elif mu == 1:
4117        mat = (
4118            (0, 0, 0, 1),
4119            (0, 0, 1, 0),
4120            (0, -1, 0, 0),
4121            (-1, 0, 0, 0)
4122        )
4123    elif mu == 2:
4124        mat = (
4125            (0, 0, 0, -I),
4126            (0, 0, I, 0),
4127            (0, I, 0, 0),
4128            (-I, 0, 0, 0)
4129        )
4130    elif mu == 3:
4131        mat = (
4132            (0, 0, 1, 0),
4133            (0, 0, 0, -1),
4134            (-1, 0, 0, 0),
4135            (0, 1, 0, 0)
4136        )
4137    else:  # mu == 5
4138        mat = (
4139            (0, 0, 1, 0),
4140            (0, 0, 0, 1),
4141            (1, 0, 0, 0),
4142            (0, 1, 0, 0)
4143        )
4144    m = Matrix(mat)
4145    if lower:
4146        if mu in [1, 2, 3, 5]:
4147            m = -m
4148    return m
4149