1 /*
2 * MATRIX UTILITY MODULE
3 *
4 * Author: Advising professor:
5 * Kenneth S. Kundert Alberto Sangiovanni-Vincentelli
6 * UC Berkeley
7 *
8 * This file contains various optional utility routines.
9 *
10 * >>> User accessible functions contained in this file:
11 * spMNA_Preorder
12 * spScale
13 * spMultiply
14 * spMultTransposed
15 * spDeterminant
16 * spStripFills
17 * spDeleteRowAndCol
18 * spPseudoCondition
19 * spCondition
20 * spNorm
21 * spLargestElement
22 * spRoundoff
23 * spErrorMessage
24 *
25 * >>> Other functions contained in this file:
26 * CountTwins
27 * SwapCols
28 * ScaleComplexMatrix
29 * ComplexMatrixMultiply
30 * ComplexCondition
31 */
32
33
34 /*
35 * Revision and copyright information.
36 *
37 * Copyright (c) 1985,86,87,88,89,90
38 * by Kenneth S. Kundert and the University of California.
39 *
40 * Permission to use, copy, modify, and distribute this software and
41 * its documentation for any purpose and without fee is hereby granted,
42 * provided that the copyright notices appear in all copies and
43 * supporting documentation and that the authors and the University of
44 * California are properly credited. The authors and the University of
45 * California make no representations as to the suitability of this
46 * software for any purpose. It is provided `as is', without express
47 * or implied warranty.
48 */
49
50 /*
51 * IMPORTS
52 *
53 * >>> Import descriptions:
54 * spConfig.h
55 * Macros that customize the sparse matrix routines.
56 * spMatrix.h
57 * Macros and declarations to be imported by the user.
58 * spDefs.h
59 * Matrix type and macro definitions for the sparse matrix routines.
60 */
61 #include <assert.h>
62 #include <stdlib.h>
63
64 #define spINSIDE_SPARSE
65 #include "spconfig.h"
66 #include "ngspice/spmatrix.h"
67 #include "spdefs.h"
68
69
70
71
72
73 /* Function declarations */
74
75 static int CountTwins( MatrixPtr, int, ElementPtr*, ElementPtr* );
76 static void SwapCols( MatrixPtr, ElementPtr, ElementPtr );
77 static void ComplexMatrixMultiply( MatrixPtr, RealVector, RealVector,
78 RealVector, RealVector );
79 static void ComplexTransposedMatrixMultiply( MatrixPtr, RealVector, RealVector,
80 RealVector, RealVector);
81
82
83
84
85
86 #if MODIFIED_NODAL
87 /*
88 * PREORDER MODIFIED NODE ADMITTANCE MATRIX TO REMOVE ZEROS FROM DIAGONAL
89 *
90 * This routine massages modified node admittance matrices to remove
91 * zeros from the diagonal. It takes advantage of the fact that the
92 * row and column associated with a zero diagonal usually have
93 * structural ones placed symmetricly. This routine should be used
94 * only on modified node admittance matrices and should be executed
95 * after the matrix has been built but before the factorization
96 * begins. It should be executed for the initial factorization only
97 * and should be executed before the rows have been linked. Thus it
98 * should be run before using spScale(), spMultiply(),
99 * spDeleteRowAndCol(), or spNorm().
100 *
101 * This routine exploits the fact that the structural ones are placed
102 * in the matrix in symmetric twins. For example, the stamps for
103 * grounded and a floating voltage sources are
104 * grounded: floating:
105 * [ x x 1 ] [ x x 1 ]
106 * [ x x ] [ x x -1 ]
107 * [ 1 ] [ 1 -1 ]
108 * Notice for the grounded source, there is one set of twins, and for
109 * the floating, there are two sets. We remove the zero from the diagonal
110 * by swapping the rows associated with a set of twins. For example:
111 * grounded: floating 1: floating 2:
112 * [ 1 ] [ 1 -1 ] [ x x 1 ]
113 * [ x x ] [ x x -1 ] [ 1 -1 ]
114 * [ x x 1 ] [ x x 1 ] [ x x -1 ]
115 *
116 * It is important to deal with any zero diagonals that only have one
117 * set of twins before dealing with those that have more than one because
118 * swapping row destroys the symmetry of any twins in the rows being
119 * swapped, which may limit future moves. Consider
120 * [ x x 1 ]
121 * [ x x -1 1 ]
122 * [ 1 -1 ]
123 * [ 1 ]
124 * There is one set of twins for diagonal 4 and two for diagonal 3.
125 * Dealing with diagonal 4 first requires swapping rows 2 and 4.
126 * [ x x 1 ]
127 * [ 1 ]
128 * [ 1 -1 ]
129 * [ x x -1 1 ]
130 * We can now deal with diagonal 3 by swapping rows 1 and 3.
131 * [ 1 -1 ]
132 * [ 1 ]
133 * [ x x 1 ]
134 * [ x x -1 1 ]
135 * And we are done, there are no zeros left on the diagonal. However, if
136 * we originally dealt with diagonal 3 first, we could swap rows 2 and 3
137 * [ x x 1 ]
138 * [ 1 -1 ]
139 * [ x x -1 1 ]
140 * [ 1 ]
141 * Diagonal 4 no longer has a symmetric twin and we cannot continue.
142 *
143 * So we always take care of lone twins first. When none remain, we
144 * choose arbitrarily a set of twins for a diagonal with more than one set
145 * and swap the rows corresponding to that twin. We then deal with any
146 * lone twins that were created and repeat the procedure until no
147 * zero diagonals with symmetric twins remain.
148 *
149 * In this particular implementation, columns are swapped rather than rows.
150 * The algorithm used in this function was developed by Ken Kundert and
151 * Tom Quarles.
152 *
153 * >>> Arguments:
154 * Matrix <input> (char *)
155 * Pointer to the matrix to be preordered.
156 *
157 * >>> Local variables;
158 * J (int)
159 * Column with zero diagonal being currently considered.
160 * pTwin1 (ElementPtr)
161 * Pointer to the twin found in the column belonging to the zero diagonal.
162 * pTwin2 (ElementPtr)
163 * Pointer to the twin found in the row belonging to the zero diagonal.
164 * belonging to the zero diagonal.
165 * AnotherPassNeeded (int)
166 * Flag indicating that at least one zero diagonal with symmetric twins
167 * remain.
168 * StartAt (int)
169 * Column number of first zero diagonal with symmetric twins.
170 * Swapped (int)
171 * Flag indicating that columns were swapped on this pass.
172 * Twins (int)
173 * Number of symmetric twins corresponding to current zero diagonal.
174 */
175
176 void
spMNA_Preorder(MatrixPtr Matrix)177 spMNA_Preorder(MatrixPtr Matrix)
178 {
179 int J, Size;
180 ElementPtr pTwin1, pTwin2;
181 int Twins, StartAt = 1;
182 int Swapped, AnotherPassNeeded;
183
184 /* Begin `spMNA_Preorder'. */
185 assert( IS_VALID(Matrix) && !Matrix->Factored );
186
187 if (Matrix->RowsLinked) return;
188 Size = Matrix->Size;
189 Matrix->Reordered = YES;
190
191 do
192 {
193 AnotherPassNeeded = Swapped = NO;
194
195 /* Search for zero diagonals with lone twins. */
196 for (J = StartAt; J <= Size; J++)
197 {
198 if (Matrix->Diag[J] == NULL)
199 {
200 Twins = CountTwins( Matrix, J, &pTwin1, &pTwin2 );
201 if (Twins == 1)
202 {
203 /* Lone twins found, swap rows. */
204 SwapCols( Matrix, pTwin1, pTwin2 );
205 Swapped = YES;
206 }
207 else if ((Twins > 1) && !AnotherPassNeeded)
208 {
209 AnotherPassNeeded = YES;
210 StartAt = J;
211 }
212 }
213 }
214
215 /* All lone twins are gone, look for zero diagonals with multiple twins. */
216 if (AnotherPassNeeded)
217 {
218 for (J = StartAt; !Swapped && (J <= Size); J++)
219 {
220 if (Matrix->Diag[J] == NULL)
221 {
222 Twins = CountTwins( Matrix, J, &pTwin1, &pTwin2 );
223 SwapCols( Matrix, pTwin1, pTwin2 );
224 Swapped = YES;
225 }
226 }
227 }
228 } while (AnotherPassNeeded);
229 return;
230 }
231
232
233
234
235 /*
236 * COUNT TWINS
237 *
238 * This function counts the number of symmetric twins associated with
239 * a zero diagonal and returns one set of twins if any exist. The
240 * count is terminated early at two.
241 */
242
243 static int
CountTwins(MatrixPtr Matrix,int Col,ElementPtr * ppTwin1,ElementPtr * ppTwin2)244 CountTwins( MatrixPtr Matrix, int Col, ElementPtr *ppTwin1, ElementPtr *ppTwin2 )
245 {
246 int Row, Twins = 0;
247 ElementPtr pTwin1, pTwin2;
248
249 /* Begin `CountTwins'. */
250
251 pTwin1 = Matrix->FirstInCol[Col];
252 while (pTwin1 != NULL)
253 {
254 if (ABS(pTwin1->Real) == 1.0)
255 {
256 Row = pTwin1->Row;
257 pTwin2 = Matrix->FirstInCol[Row];
258 while ((pTwin2 != NULL) && (pTwin2->Row != Col))
259 pTwin2 = pTwin2->NextInCol;
260 if ((pTwin2 != NULL) && (ABS(pTwin2->Real) == 1.0))
261 {
262 /* Found symmetric twins. */
263 if (++Twins >= 2) return Twins;
264 (*ppTwin1 = pTwin1)->Col = Col;
265 (*ppTwin2 = pTwin2)->Col = Row;
266 }
267 }
268 pTwin1 = pTwin1->NextInCol;
269 }
270 return Twins;
271 }
272
273
274
275
276 /*
277 * SWAP COLUMNS
278 *
279 * This function swaps two columns and is applicable before the rows are
280 * linked.
281 */
282
283 static void
SwapCols(MatrixPtr Matrix,ElementPtr pTwin1,ElementPtr pTwin2)284 SwapCols( MatrixPtr Matrix, ElementPtr pTwin1, ElementPtr pTwin2 )
285 {
286 int Col1 = pTwin1->Col, Col2 = pTwin2->Col;
287
288 /* Begin `SwapCols'. */
289
290 SWAP (ElementPtr, Matrix->FirstInCol[Col1], Matrix->FirstInCol[Col2]);
291 SWAP (int, Matrix->IntToExtColMap[Col1], Matrix->IntToExtColMap[Col2]);
292 #if TRANSLATE
293 Matrix->ExtToIntColMap[Matrix->IntToExtColMap[Col2]]=Col2;
294 Matrix->ExtToIntColMap[Matrix->IntToExtColMap[Col1]]=Col1;
295 #endif
296
297 Matrix->Diag[Col1] = pTwin2;
298 Matrix->Diag[Col2] = pTwin1;
299 Matrix->NumberOfInterchangesIsOdd = !Matrix->NumberOfInterchangesIsOdd;
300 return;
301 }
302 #endif /* MODIFIED_NODAL */
303
304
305
306
307
308
309
310
311
312 #if SCALING
313 /*
314 * SCALE MATRIX
315 *
316 * This function scales the matrix to enhance the possibility of
317 * finding a good pivoting order. Note that scaling enhances accuracy
318 * of the solution only if it affects the pivoting order, so it makes
319 * no sense to scale the matrix before spFactor(). If scaling is
320 * desired it should be done before spOrderAndFactor(). There
321 * are several things to take into account when choosing the scale
322 * factors. First, the scale factors are directly multiplied against
323 * the elements in the matrix. To prevent roundoff, each scale factor
324 * should be equal to an integer power of the number base of the
325 * machine. Since most machines operate in base two, scale factors
326 * should be a power of two. Second, the matrix should be scaled such
327 * that the matrix of element uncertainties is equilibrated. Third,
328 * this function multiplies the scale factors by the elements, so if
329 * one row tends to have uncertainties 1000 times smaller than the
330 * other rows, then its scale factor should be 1024, not 1/1024.
331 * Fourth, to save time, this function does not scale rows or columns
332 * if their scale factors are equal to one. Thus, the scale factors
333 * should be normalized to the most common scale factor. Rows and
334 * columns should be normalized separately. For example, if the size
335 * of the matrix is 100 and 10 rows tend to have uncertainties near
336 * 1e-6 and the remaining 90 have uncertainties near 1e-12, then the
337 * scale factor for the 10 should be 1/1,048,576 and the scale factors
338 * for the remaining 90 should be 1. Fifth, since this routine
339 * directly operates on the matrix, it is necessary to apply the scale
340 * factors to the RHS and Solution vectors. It may be easier to
341 * simply use spOrderAndFactor() on a scaled matrix to choose the
342 * pivoting order, and then throw away the matrix. Subsequent
343 * factorizations, performed with spFactor(), will not need to have
344 * the RHS and Solution vectors descaled. Lastly, this function
345 * should not be executed before the function spMNA_Preorder.
346 *
347 * >>> Arguments:
348 * Matrix <input> (char *)
349 * Pointer to the matrix to be scaled.
350 * SolutionScaleFactors <input> (RealVector)
351 * The array of Solution scale factors. These factors scale the columns.
352 * All scale factors are real valued.
353 * RHS_ScaleFactors <input> (RealVector)
354 * The array of RHS scale factors. These factors scale the rows.
355 * All scale factors are real valued.
356 *
357 * >>> Local variables:
358 * lSize (int)
359 * Local version of the size of the matrix.
360 * pElement (ElementPtr)
361 * Pointer to an element in the matrix.
362 * pExtOrder (int *)
363 * Pointer into either IntToExtRowMap or IntToExtColMap vector. Used to
364 * compensate for any row or column swaps that have been performed.
365 * ScaleFactor (RealNumber)
366 * The scale factor being used on the current row or column.
367 */
368
369 void
spScale(MatrixPtr Matrix,RealVector RHS_ScaleFactors,RealVector SolutionScaleFactors)370 spScale(MatrixPtr Matrix, RealVector RHS_ScaleFactors, RealVector SolutionScaleFactors)
371 {
372 ElementPtr pElement;
373 int I, lSize, *pExtOrder;
374 RealNumber ScaleFactor;
375 void ScaleComplexMatrix();
376
377 /* Begin `spScale'. */
378 assert( IS_VALID(Matrix) && !Matrix->Factored );
379 if (!Matrix->RowsLinked) spcLinkRows( Matrix );
380
381 if (Matrix->Complex)
382 {
383 ScaleComplexMatrix( Matrix, RHS_ScaleFactors, SolutionScaleFactors );
384 return;
385 }
386
387 lSize = Matrix->Size;
388
389 /* Scale Rows */
390 pExtOrder = &Matrix->IntToExtRowMap[1];
391 for (I = 1; I <= lSize; I++)
392 {
393 if ((ScaleFactor = RHS_ScaleFactors[*(pExtOrder++)]) != 1.0)
394 {
395 pElement = Matrix->FirstInRow[I];
396
397 while (pElement != NULL)
398 {
399 pElement->Real *= ScaleFactor;
400 pElement = pElement->NextInRow;
401 }
402 }
403 }
404
405 /* Scale Columns */
406 pExtOrder = &Matrix->IntToExtColMap[1];
407 for (I = 1; I <= lSize; I++)
408 {
409 if ((ScaleFactor = SolutionScaleFactors[*(pExtOrder++)]) != 1.0)
410 {
411 pElement = Matrix->FirstInCol[I];
412
413 while (pElement != NULL)
414 {
415 pElement->Real *= ScaleFactor;
416 pElement = pElement->NextInCol;
417 }
418 }
419 }
420 return;
421
422 }
423 #endif /* SCALING */
424
425
426
427
428
429
430
431
432
433 #if SCALING
434 /*
435 * SCALE COMPLEX MATRIX
436 *
437 * This function scales the matrix to enhance the possibility of
438 * finding a good pivoting order. Note that scaling enhances accuracy
439 * of the solution only if it affects the pivoting order, so it makes
440 * no sense to scale the matrix before spFactor(). If scaling is
441 * desired it should be done before spOrderAndFactor(). There
442 * are several things to take into account when choosing the scale
443 * factors. First, the scale factors are directly multiplied against
444 * the elements in the matrix. To prevent roundoff, each scale factor
445 * should be equal to an integer power of the number base of the
446 * machine. Since most machines operate in base two, scale factors
447 * should be a power of two. Second, the matrix should be scaled such
448 * that the matrix of element uncertainties is equilibrated. Third,
449 * this function multiplies the scale factors by the elements, so if
450 * one row tends to have uncertainties 1000 times smaller than the
451 * other rows, then its scale factor should be 1024, not 1/1024.
452 * Fourth, to save time, this function does not scale rows or columns
453 * if their scale factors are equal to one. Thus, the scale factors
454 * should be normalized to the most common scale factor. Rows and
455 * columns should be normalized separately. For example, if the size
456 * of the matrix is 100 and 10 rows tend to have uncertainties near
457 * 1e-6 and the remaining 90 have uncertainties near 1e-12, then the
458 * scale factor for the 10 should be 1/1,048,576 and the scale factors
459 * for the remaining 90 should be 1. Fifth, since this routine
460 * directly operates on the matrix, it is necessary to apply the scale
461 * factors to the RHS and Solution vectors. It may be easier to
462 * simply use spOrderAndFactor() on a scaled matrix to choose the
463 * pivoting order, and then throw away the matrix. Subsequent
464 * factorizations, performed with spFactor(), will not need to have
465 * the RHS and Solution vectors descaled. Lastly, this function
466 * should not be executed before the function spMNA_Preorder.
467 *
468 * >>> Arguments:
469 * Matrix <input> (char *)
470 * Pointer to the matrix to be scaled.
471 * SolutionScaleFactors <input> (RealVector)
472 * The array of Solution scale factors. These factors scale the columns.
473 * All scale factors are real valued.
474 * RHS_ScaleFactors <input> (RealVector)
475 * The array of RHS scale factors. These factors scale the rows.
476 * All scale factors are real valued.
477 *
478 * >>> Local variables:
479 * lSize (int)
480 * Local version of the size of the matrix.
481 * pElement (ElementPtr)
482 * Pointer to an element in the matrix.
483 * pExtOrder (int *)
484 * Pointer into either IntToExtRowMap or IntToExtColMap vector. Used to
485 * compensate for any row or column swaps that have been performed.
486 * ScaleFactor (RealNumber)
487 * The scale factor being used on the current row or column.
488 */
489
490 static void
ScaleComplexMatrix(Matrix,RHS_ScaleFactors,SolutionScaleFactors)491 ScaleComplexMatrix( Matrix, RHS_ScaleFactors, SolutionScaleFactors )
492
493 MatrixPtr Matrix;
494 RealVector RHS_ScaleFactors, SolutionScaleFactors;
495 {
496 ElementPtr pElement;
497 int I, lSize, *pExtOrder;
498 RealNumber ScaleFactor;
499
500 /* Begin `ScaleComplexMatrix'. */
501 lSize = Matrix->Size;
502
503 /* Scale Rows */
504 pExtOrder = &Matrix->IntToExtRowMap[1];
505 for (I = 1; I <= lSize; I++)
506 {
507 if ((ScaleFactor = RHS_ScaleFactors[*(pExtOrder++)]) != 1.0)
508 {
509 pElement = Matrix->FirstInRow[I];
510
511 while (pElement != NULL)
512 {
513 pElement->Real *= ScaleFactor;
514 pElement->Imag *= ScaleFactor;
515 pElement = pElement->NextInRow;
516 }
517 }
518 }
519
520 /* Scale Columns */
521 pExtOrder = &Matrix->IntToExtColMap[1];
522 for (I = 1; I <= lSize; I++)
523 {
524 if ((ScaleFactor = SolutionScaleFactors[*(pExtOrder++)]) != 1.0)
525 {
526 pElement = Matrix->FirstInCol[I];
527
528 while (pElement != NULL)
529 {
530 pElement->Real *= ScaleFactor;
531 pElement->Imag *= ScaleFactor;
532 pElement = pElement->NextInCol;
533 }
534 }
535 }
536 return;
537 }
538 #endif /* SCALING */
539
540
541
542
543
544
545
546
547 #if MULTIPLICATION
548 /*
549 * MATRIX MULTIPLICATION
550 *
551 * Multiplies matrix by solution vector to find source vector.
552 * Assumes matrix has not been factored. This routine can be used
553 * as a test to see if solutions are correct. It should not be used
554 * before spMNA_Preorder().
555 *
556 * >>> Arguments:
557 * Matrix <input> (char *)
558 * Pointer to the matrix.
559 * RHS <output> (RealVector)
560 * RHS is the right hand side. This is what is being solved for.
561 * Solution <input> (RealVector)
562 * Solution is the vector being multiplied by the matrix.
563 * iRHS <output> (RealVector)
564 * iRHS is the imaginary portion of the right hand side. This is
565 * what is being solved for.
566 * iSolution <input> (RealVector)
567 * iSolution is the imaginary portion of the vector being multiplied
568 * by the matrix.
569 */
570
571 void
spMultiply(MatrixPtr Matrix,RealVector RHS,RealVector Solution,RealVector iRHS,RealVector iSolution)572 spMultiply(MatrixPtr Matrix, RealVector RHS, RealVector Solution,
573 RealVector iRHS, RealVector iSolution)
574 {
575 ElementPtr pElement;
576 RealVector Vector;
577 RealNumber Sum;
578 int I, *pExtOrder;
579
580 /* Begin `spMultiply'. */
581 assert( IS_SPARSE( Matrix ) && !Matrix->Factored );
582 if (!Matrix->RowsLinked)
583 spcLinkRows(Matrix);
584 if (!Matrix->InternalVectorsAllocated)
585 spcCreateInternalVectors( Matrix );
586
587 if (Matrix->Complex)
588 {
589 ComplexMatrixMultiply( Matrix, RHS, Solution , iRHS, iSolution );
590 return;
591 }
592
593 /* Initialize Intermediate vector with reordered Solution vector. */
594 Vector = Matrix->Intermediate;
595 pExtOrder = &Matrix->IntToExtColMap[Matrix->Size];
596 for (I = Matrix->Size; I > 0; I--)
597 Vector[I] = Solution[*(pExtOrder--)];
598
599 pExtOrder = &Matrix->IntToExtRowMap[Matrix->Size];
600 for (I = Matrix->Size; I > 0; I--)
601 {
602 pElement = Matrix->FirstInRow[I];
603 Sum = 0.0;
604
605 while (pElement != NULL)
606 {
607 Sum += pElement->Real * Vector[pElement->Col];
608 pElement = pElement->NextInRow;
609 }
610 RHS[*pExtOrder--] = Sum;
611 }
612 return;
613 }
614 #endif /* MULTIPLICATION */
615
616
617
618
619
620
621
622 #if MULTIPLICATION
623 /*
624 * COMPLEX MATRIX MULTIPLICATION
625 *
626 * Multiplies matrix by solution vector to find source vector.
627 * Assumes matrix has not been factored. This routine can be used
628 * as a test to see if solutions are correct.
629 *
630 * >>> Arguments:
631 * Matrix <input> (char *)
632 * Pointer to the matrix.
633 * RHS <output> (RealVector)
634 * RHS is the right hand side. This is what is being solved for.
635 * Solution <input> (RealVector)
636 * Solution is the vector being multiplied by the matrix.
637 * iRHS <output> (RealVector)
638 * iRHS is the imaginary portion of the right hand side. This is
639 * what is being solved for.
640 * iSolution <input> (RealVector)
641 * iSolution is the imaginary portion of the vector being multiplied
642 * by the matrix.
643 */
644
645 static void
ComplexMatrixMultiply(MatrixPtr Matrix,RealVector RHS,RealVector Solution,RealVector iRHS,RealVector iSolution)646 ComplexMatrixMultiply( MatrixPtr Matrix, RealVector RHS, RealVector Solution , RealVector iRHS, RealVector iSolution )
647 {
648 ElementPtr pElement;
649 ComplexVector Vector;
650 ComplexNumber Sum;
651 int I, *pExtOrder;
652
653 /* Begin `ComplexMatrixMultiply'. */
654
655 /* Initialize Intermediate vector with reordered Solution vector. */
656 Vector = (ComplexVector)Matrix->Intermediate;
657 pExtOrder = &Matrix->IntToExtColMap[Matrix->Size];
658
659 for (I = Matrix->Size; I > 0; I--)
660 {
661 Vector[I].Real = Solution[*pExtOrder];
662 Vector[I].Imag = iSolution[*(pExtOrder--)];
663 }
664
665 pExtOrder = &Matrix->IntToExtRowMap[Matrix->Size];
666 for (I = Matrix->Size; I > 0; I--)
667 {
668 pElement = Matrix->FirstInRow[I];
669 Sum.Real = Sum.Imag = 0.0;
670
671 while (pElement != NULL)
672 {
673 /* Cmplx expression : Sum += Element * Vector[Col] */
674 CMPLX_MULT_ADD_ASSIGN( Sum, *pElement, Vector[pElement->Col] );
675 pElement = pElement->NextInRow;
676 }
677
678 RHS[*pExtOrder] = Sum.Real;
679 iRHS[*pExtOrder--] = Sum.Imag;
680 }
681 return;
682 }
683 #endif /* MULTIPLICATION */
684
685
686
687
688
689
690
691
692 #if MULTIPLICATION && TRANSPOSE
693 /*
694 * TRANSPOSED MATRIX MULTIPLICATION
695 *
696 * Multiplies transposed matrix by solution vector to find source vector.
697 * Assumes matrix has not been factored. This routine can be used
698 * as a test to see if solutions are correct. It should not be used
699 * before spMNA_Preorder().
700 *
701 * >>> Arguments:
702 * Matrix <input> (char *)
703 * Pointer to the matrix.
704 * RHS <output> (RealVector)
705 * RHS is the right hand side. This is what is being solved for.
706 * Solution <input> (RealVector)
707 * Solution is the vector being multiplied by the matrix.
708 * iRHS <output> (RealVector)
709 * iRHS is the imaginary portion of the right hand side. This is
710 * what is being solved for.
711 * iSolution <input> (RealVector)
712 * iSolution is the imaginary portion of the vector being multiplied
713 * by the matrix.
714 */
715
716 void
spMultTransposed(MatrixPtr Matrix,RealVector RHS,RealVector Solution,RealVector iRHS,RealVector iSolution)717 spMultTransposed(MatrixPtr Matrix, RealVector RHS, RealVector Solution,
718 RealVector iRHS, RealVector iSolution)
719 {
720 ElementPtr pElement;
721 RealVector Vector;
722 RealNumber Sum;
723 int I, *pExtOrder;
724
725 /* Begin `spMultTransposed'. */
726 assert( IS_SPARSE( Matrix ) && !Matrix->Factored );
727 if (!Matrix->InternalVectorsAllocated)
728 spcCreateInternalVectors( Matrix );
729
730 if (Matrix->Complex)
731 {
732 ComplexTransposedMatrixMultiply( Matrix, RHS, Solution , iRHS, iSolution );
733 return;
734 }
735
736 /* Initialize Intermediate vector with reordered Solution vector. */
737 Vector = Matrix->Intermediate;
738 pExtOrder = &Matrix->IntToExtRowMap[Matrix->Size];
739 for (I = Matrix->Size; I > 0; I--)
740 Vector[I] = Solution[*(pExtOrder--)];
741
742 pExtOrder = &Matrix->IntToExtColMap[Matrix->Size];
743 for (I = Matrix->Size; I > 0; I--)
744 {
745 pElement = Matrix->FirstInCol[I];
746 Sum = 0.0;
747
748 while (pElement != NULL)
749 {
750 Sum += pElement->Real * Vector[pElement->Row];
751 pElement = pElement->NextInCol;
752 }
753 RHS[*pExtOrder--] = Sum;
754 }
755 return;
756 }
757 #endif /* MULTIPLICATION && TRANSPOSE */
758
759
760
761
762
763
764
765 #if MULTIPLICATION && TRANSPOSE
766 /*
767 * COMPLEX TRANSPOSED MATRIX MULTIPLICATION
768 *
769 * Multiplies transposed matrix by solution vector to find source vector.
770 * Assumes matrix has not been factored. This routine can be used
771 * as a test to see if solutions are correct.
772 *
773 * >>> Arguments:
774 * Matrix <input> (char *)
775 * Pointer to the matrix.
776 * RHS <output> (RealVector)
777 * RHS is the right hand side. This is what is being solved for.
778 * Solution <input> (RealVector)
779 * Solution is the vector being multiplied by the matrix.
780 * iRHS <output> (RealVector)
781 * iRHS is the imaginary portion of the right hand side. This is
782 * what is being solved for.
783 * iSolution <input> (RealVector)
784 * iSolution is the imaginary portion of the vector being multiplied
785 * by the matrix.
786 */
787
788 static void
ComplexTransposedMatrixMultiply(MatrixPtr Matrix,RealVector RHS,RealVector Solution,RealVector iRHS,RealVector iSolution)789 ComplexTransposedMatrixMultiply( MatrixPtr Matrix, RealVector RHS, RealVector Solution , RealVector iRHS, RealVector iSolution )
790 {
791 ElementPtr pElement;
792 ComplexVector Vector;
793 ComplexNumber Sum;
794 int I, *pExtOrder;
795
796 /* Begin `ComplexMatrixMultiply'. */
797
798 /* Initialize Intermediate vector with reordered Solution vector. */
799 Vector = (ComplexVector)Matrix->Intermediate;
800 pExtOrder = &Matrix->IntToExtRowMap[Matrix->Size];
801
802 for (I = Matrix->Size; I > 0; I--)
803 {
804 Vector[I].Real = Solution[*pExtOrder];
805 Vector[I].Imag = iSolution[*(pExtOrder--)];
806 }
807
808 pExtOrder = &Matrix->IntToExtColMap[Matrix->Size];
809 for (I = Matrix->Size; I > 0; I--)
810 {
811 pElement = Matrix->FirstInCol[I];
812 Sum.Real = Sum.Imag = 0.0;
813
814 while (pElement != NULL)
815 {
816 /* Cmplx expression : Sum += Element * Vector[Row] */
817 CMPLX_MULT_ADD_ASSIGN( Sum, *pElement, Vector[pElement->Row] );
818 pElement = pElement->NextInCol;
819 }
820
821 RHS[*pExtOrder] = Sum.Real;
822 iRHS[*pExtOrder--] = Sum.Imag;
823 }
824 return;
825 }
826 #endif /* MULTIPLICATION && TRANSPOSE */
827
828
829
830
831
832
833
834
835 #if DETERMINANT
836 /*
837 * CALCULATE DETERMINANT
838 *
839 * This routine in capable of calculating the determinant of the
840 * matrix once the LU factorization has been performed. Hence, only
841 * use this routine after spFactor() and before spClear().
842 * The determinant equals the product of all the diagonal elements of
843 * the lower triangular matrix L, except that this product may need
844 * negating. Whether the product or the negative product equals the
845 * determinant is determined by the number of row and column
846 * interchanges performed. Note that the determinants of matrices can
847 * be very large or very small. On large matrices, the determinant
848 * can be far larger or smaller than can be represented by a floating
849 * point number. For this reason the determinant is scaled to a
850 * reasonable value and the logarithm of the scale factor is returned.
851 *
852 * >>> Arguments:
853 * Matrix <input> (char *)
854 * A pointer to the matrix for which the determinant is desired.
855 * pExponent <output> (int *)
856 * The logarithm base 10 of the scale factor for the determinant. To find
857 * the actual determinant, Exponent should be added to the exponent of
858 * Determinant.
859 * pDeterminant <output> (RealNumber *)
860 * The real portion of the determinant. This number is scaled to be
861 * greater than or equal to 1.0 and less than 10.0.
862 * piDeterminant <output> (RealNumber *)
863 * The imaginary portion of the determinant. When the matrix is real
864 * this pointer need not be supplied, nothing will be returned. This
865 * number is scaled to be greater than or equal to 1.0 and less than 10.0.
866 *
867 * >>> Local variables:
868 * Norm (RealNumber)
869 * L-infinity norm of a complex number.
870 * Size (int)
871 * Local storage for Matrix->Size. Placed in a for speed.
872 * Temp (RealNumber)
873 * Temporary storage for real portion of determinant.
874 */
875
876 void
spDeterminant(MatrixPtr Matrix,int * pExponent,RealNumber * pDeterminant,RealNumber * piDeterminant)877 spDeterminant(MatrixPtr Matrix, int *pExponent, RealNumber *pDeterminant,
878 RealNumber *piDeterminant)
879 {
880 int I, Size;
881 RealNumber Norm, nr, ni;
882 ComplexNumber Pivot, cDeterminant;
883
884 #define NORM(a) (nr = ABS((a).Real), ni = ABS((a).Imag), MAX (nr,ni))
885
886 /* Begin `spDeterminant'. */
887 assert( IS_SPARSE( Matrix ) && IS_FACTORED(Matrix) );
888 *pExponent = 0;
889
890 if (Matrix->Error == spSINGULAR)
891 {
892 *pDeterminant = 0.0;
893 if (Matrix->Complex) *piDeterminant = 0.0;
894 return;
895 }
896
897 Size = Matrix->Size;
898 I = 0;
899
900 if (Matrix->Complex) /* Complex Case. */
901 {
902 cDeterminant.Real = 1.0;
903 cDeterminant.Imag = 0.0;
904
905 while (++I <= Size)
906 {
907 CMPLX_RECIPROCAL( Pivot, *Matrix->Diag[I] );
908 CMPLX_MULT_ASSIGN( cDeterminant, Pivot );
909
910 /* Scale Determinant. */
911 Norm = NORM( cDeterminant );
912 if (Norm != 0.0)
913 {
914 while (Norm >= 1.0e12)
915 {
916 cDeterminant.Real *= 1.0e-12;
917 cDeterminant.Imag *= 1.0e-12;
918 *pExponent += 12;
919 Norm = NORM( cDeterminant );
920 }
921 while (Norm < 1.0e-12)
922 {
923 cDeterminant.Real *= 1.0e12;
924 cDeterminant.Imag *= 1.0e12;
925 *pExponent -= 12;
926 Norm = NORM( cDeterminant );
927 }
928 }
929 }
930
931 /* Scale Determinant again, this time to be between 1.0 <= x < 10.0. */
932 Norm = NORM( cDeterminant );
933 if (Norm != 0.0)
934 {
935 while (Norm >= 10.0)
936 {
937 cDeterminant.Real *= 0.1;
938 cDeterminant.Imag *= 0.1;
939 (*pExponent)++;
940 Norm = NORM( cDeterminant );
941 }
942 while (Norm < 1.0)
943 {
944 cDeterminant.Real *= 10.0;
945 cDeterminant.Imag *= 10.0;
946 (*pExponent)--;
947 Norm = NORM( cDeterminant );
948 }
949 }
950 if (Matrix->NumberOfInterchangesIsOdd)
951 CMPLX_NEGATE( cDeterminant );
952
953 *pDeterminant = cDeterminant.Real;
954 *piDeterminant = cDeterminant.Imag;
955 }
956 else
957 {
958 /* Real Case. */
959 *pDeterminant = 1.0;
960
961 while (++I <= Size)
962 {
963 *pDeterminant /= Matrix->Diag[I]->Real;
964
965 /* Scale Determinant. */
966 if (*pDeterminant != 0.0)
967 {
968 while (ABS(*pDeterminant) >= 1.0e12)
969 {
970 *pDeterminant *= 1.0e-12;
971 *pExponent += 12;
972 }
973 while (ABS(*pDeterminant) < 1.0e-12)
974 {
975 *pDeterminant *= 1.0e12;
976 *pExponent -= 12;
977 }
978 }
979 }
980
981 /* Scale Determinant again, this time to be between 1.0 <= x <
982 10.0. */
983 if (*pDeterminant != 0.0)
984 {
985 while (ABS(*pDeterminant) >= 10.0)
986 {
987 *pDeterminant *= 0.1;
988 (*pExponent)++;
989 }
990 while (ABS(*pDeterminant) < 1.0)
991 {
992 *pDeterminant *= 10.0;
993 (*pExponent)--;
994 }
995 }
996 if (Matrix->NumberOfInterchangesIsOdd)
997 *pDeterminant = -*pDeterminant;
998 }
999 }
1000 #endif /* DETERMINANT */
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 #if STRIP
1011 /*
1012 * STRIP FILL-INS FROM MATRIX
1013 *
1014 * Strips the matrix of all fill-ins.
1015 *
1016 * >>> Arguments:
1017 * Matrix <input> (char *)
1018 * Pointer to the matrix to be stripped.
1019 *
1020 * >>> Local variables:
1021 * pElement (ElementPtr)
1022 * Pointer that is used to step through the matrix.
1023 * ppElement (ElementPtr *)
1024 * Pointer to the location of an ElementPtr. This location will be
1025 * updated if a fill-in is stripped from the matrix.
1026 * pFillin (ElementPtr)
1027 * Pointer used to step through the lists of fill-ins while marking them.
1028 * pLastFillin (ElementPtr)
1029 * A pointer to the last fill-in in the list. Used to terminate a loop.
1030 * pListNode (struct FillinListNodeStruct *)
1031 * A pointer to a node in the FillinList linked-list.
1032 */
1033
1034 void
spStripFills(MatrixPtr Matrix)1035 spStripFills(MatrixPtr Matrix)
1036 {
1037 struct FillinListNodeStruct *pListNode;
1038
1039 /* Begin `spStripFills'. */
1040 assert( IS_SPARSE( Matrix ) );
1041 if (Matrix->Fillins == 0) return;
1042 Matrix->NeedsOrdering = YES;
1043 Matrix->Elements -= Matrix->Fillins;
1044 Matrix->Fillins = 0;
1045
1046 /* Mark the fill-ins. */
1047 {
1048 ElementPtr pFillin, pLastFillin;
1049
1050 pListNode = Matrix->LastFillinListNode = Matrix->FirstFillinListNode;
1051 Matrix->FillinsRemaining = pListNode->NumberOfFillinsInList;
1052 Matrix->NextAvailFillin = pListNode->pFillinList;
1053
1054 while (pListNode != NULL)
1055 {
1056 pFillin = pListNode->pFillinList;
1057 pLastFillin = &(pFillin[ pListNode->NumberOfFillinsInList - 1 ]);
1058 while (pFillin <= pLastFillin)
1059 (pFillin++)->Row = 0;
1060 pListNode = pListNode->Next;
1061 }
1062 }
1063
1064 /* Unlink fill-ins by searching for elements marked with Row = 0. */
1065 {
1066 ElementPtr pElement, *ppElement;
1067 int I, Size = Matrix->Size;
1068
1069 /* Unlink fill-ins in all columns. */
1070 for (I = 1; I <= Size; I++)
1071 {
1072 ppElement = &(Matrix->FirstInCol[I]);
1073 while ((pElement = *ppElement) != NULL)
1074 {
1075 if (pElement->Row == 0)
1076 {
1077 *ppElement = pElement->NextInCol; /* Unlink fill-in. */
1078 if (Matrix->Diag[pElement->Col] == pElement)
1079 Matrix->Diag[pElement->Col] = NULL;
1080 }
1081 else
1082 ppElement = &pElement->NextInCol; /* Skip element. */
1083 }
1084 }
1085
1086 /* Unlink fill-ins in all rows. */
1087 for (I = 1; I <= Size; I++)
1088 {
1089 ppElement = &(Matrix->FirstInRow[I]);
1090 while ((pElement = *ppElement) != NULL)
1091 {
1092 if (pElement->Row == 0)
1093 *ppElement = pElement->NextInRow; /* Unlink fill-in. */
1094 else
1095 ppElement = &pElement->NextInRow; /* Skip element. */
1096 }
1097 }
1098 }
1099 return;
1100 }
1101
1102 /* Same as above, but strips entire matrix without destroying the
1103 * frame. This assumes that the matrix will be replaced with one of
1104 * the same size. */
1105 void
spStripMatrix(MatrixPtr Matrix)1106 spStripMatrix(MatrixPtr Matrix)
1107 {
1108 /* Begin `spStripMatrix'. */
1109 assert( IS_SPARSE( Matrix ) );
1110 if (Matrix->Elements == 0) return;
1111 Matrix->RowsLinked = NO;
1112 Matrix->NeedsOrdering = YES;
1113 Matrix->Elements = 0;
1114 Matrix->Originals = 0;
1115 Matrix->Fillins = 0;
1116
1117 /* Reset the element lists. */
1118 {
1119 struct ElementListNodeStruct *pListNode;
1120
1121 pListNode = Matrix->LastElementListNode = Matrix->FirstElementListNode;
1122 Matrix->ElementsRemaining = pListNode->NumberOfElementsInList;
1123 Matrix->NextAvailElement = pListNode->pElementList;
1124 }
1125
1126 /* Reset the fill-in lists. */
1127 {
1128 struct FillinListNodeStruct *pListNode;
1129
1130 pListNode = Matrix->LastFillinListNode = Matrix->FirstFillinListNode;
1131 Matrix->FillinsRemaining = pListNode->NumberOfFillinsInList;
1132 Matrix->NextAvailFillin = pListNode->pFillinList;
1133 }
1134
1135 /* Reset the Row, Column and Diag pointers */
1136 {
1137 int I, Size = Matrix->Size;
1138 for (I = 1; I <= Size; I++)
1139 {
1140 Matrix->FirstInRow[I] = NULL;
1141 Matrix->FirstInCol[I] = NULL;
1142 Matrix->Diag[I] = NULL;
1143 }
1144 }
1145 }
1146 #endif
1147
1148
1149
1150
1151
1152
1153
1154 #if TRANSLATE && DELETE
1155 /*
1156 * DELETE A ROW AND COLUMN FROM THE MATRIX
1157 *
1158 * Deletes a row and a column from a matrix.
1159 *
1160 * Sparse will abort if an attempt is made to delete a row or column that
1161 * doesn't exist.
1162 *
1163 * >>> Arguments:
1164 * Matrix <input> (char *)
1165 * Pointer to the matrix in which the row and column are to be deleted.
1166 * Row <input> (int)
1167 * Row to be deleted.
1168 * Col <input> (int)
1169 * Column to be deleted.
1170 *
1171 * >>> Local variables:
1172 * ExtCol (int)
1173 * The external column that is being deleted.
1174 * ExtRow (int)
1175 * The external row that is being deleted.
1176 * pElement (ElementPtr)
1177 * Pointer to an element in the matrix. Used when scanning rows and
1178 * columns in order to eliminate elements from the last row or column.
1179 * ppElement (ElementPtr *)
1180 * Pointer to the location of an ElementPtr. This location will be
1181 * filled with a NULL pointer if it is the new last element in its row
1182 * or column.
1183 * pElement (ElementPtr)
1184 * Pointer to an element in the last row or column of the matrix.
1185 * Size (int)
1186 * The local version Matrix->Size, the size of the matrix.
1187 */
1188
1189 void
spDeleteRowAndCol(MatrixPtr Matrix,int Row,int Col)1190 spDeleteRowAndCol(MatrixPtr Matrix, int Row, int Col)
1191 {
1192 ElementPtr pElement, *ppElement, pLastElement;
1193 int Size, ExtRow, ExtCol;
1194 ElementPtr spcFindElementInCol();
1195
1196 /* Begin `spDeleteRowAndCol'. */
1197
1198 assert( IS_SPARSE(Matrix) && Row > 0 && Col > 0 );
1199 assert( Row <= Matrix->ExtSize && Col <= Matrix->ExtSize );
1200
1201 Size = Matrix->Size;
1202 ExtRow = Row;
1203 ExtCol = Col;
1204 if (!Matrix->RowsLinked)
1205 spcLinkRows( Matrix );
1206
1207 Row = Matrix->ExtToIntRowMap[Row];
1208 Col = Matrix->ExtToIntColMap[Col];
1209 assert( Row > 0 && Col > 0 );
1210
1211 /* Move Row so that it is the last row in the matrix. */
1212 if (Row != Size) spcRowExchange( Matrix, Row, Size );
1213
1214 /* Move Col so that it is the last column in the matrix. */
1215 if (Col != Size) spcColExchange( Matrix, Col, Size );
1216
1217 /* Correct Diag pointers. */
1218 if (Row == Col)
1219 SWAP( ElementPtr, Matrix->Diag[Row], Matrix->Diag[Size] );
1220 else {
1221 Matrix->Diag[Row] = spcFindElementInCol(Matrix,
1222 Matrix->FirstInCol+Row,
1223 Row, Row, NO );
1224 Matrix->Diag[Col] = spcFindElementInCol(Matrix,
1225 Matrix->FirstInCol+Col,
1226 Col, Col, NO );
1227 }
1228
1229 /* Delete last row and column of the matrix. */
1230 /* Break the column links to every element in the last row. */
1231 pLastElement = Matrix->FirstInRow[ Size ];
1232 while (pLastElement != NULL) {
1233 ppElement = &(Matrix->FirstInCol[ pLastElement->Col ]);
1234 while ((pElement = *ppElement) != NULL) {
1235 if (pElement == pLastElement)
1236 *ppElement = NULL; /* Unlink last element in column. */
1237 else
1238 ppElement = &pElement->NextInCol; /* Skip element. */
1239 }
1240 pLastElement = pLastElement->NextInRow;
1241 }
1242
1243 /* Break the row links to every element in the last column. */
1244 pLastElement = Matrix->FirstInCol[ Size ];
1245 while (pLastElement != NULL) {
1246 ppElement = &(Matrix->FirstInRow[ pLastElement->Row ]);
1247 while ((pElement = *ppElement) != NULL) {
1248 if (pElement == pLastElement)
1249 *ppElement = NULL; /* Unlink last element in row. */
1250 else
1251 ppElement = &pElement->NextInRow; /* Skip element. */
1252 }
1253 pLastElement = pLastElement->NextInCol;
1254 }
1255
1256 /* Clean up some details. */
1257 Matrix->Size = Size - 1;
1258 Matrix->Diag[Size] = NULL;
1259 Matrix->FirstInRow[Size] = NULL;
1260 Matrix->FirstInCol[Size] = NULL;
1261 Matrix->CurrentSize--;
1262 Matrix->ExtToIntRowMap[ExtRow] = -1;
1263 Matrix->ExtToIntColMap[ExtCol] = -1;
1264 Matrix->NeedsOrdering = YES;
1265
1266 return;
1267 }
1268 #endif
1269
1270
1271
1272
1273
1274
1275
1276
1277 #if PSEUDOCONDITION
1278 /*
1279 * CALCULATE PSEUDOCONDITION
1280 *
1281 * Computes the magnitude of the ratio of the largest to the smallest
1282 * pivots. This quantity is an indicator of ill-conditioning in the
1283 * matrix. If this ratio is large, and if the matrix is scaled such
1284 * that uncertainties in the RHS and the matrix entries are
1285 * equilibrated, then the matrix is ill-conditioned. However, a
1286 * small ratio does not necessarily imply that the matrix is
1287 * well-conditioned. This routine must only be used after a matrix
1288 * has been factored by spOrderAndFactor() or spFactor() and before
1289 * it is cleared by spClear() or spInitialize(). The pseudocondition
1290 * is faster to compute than the condition number calculated by
1291 * spCondition(), but is not as informative.
1292 *
1293 * >>> Returns:
1294 * The magnitude of the ratio of the largest to smallest pivot used during
1295 * previous factorization. If the matrix was singular, zero is returned.
1296 *
1297 * >>> Arguments:
1298 * Matrix <input> (char *)
1299 * Pointer to the matrix. */
1300
1301 RealNumber
spPseudoCondition(MatrixPtr Matrix)1302 spPseudoCondition(MatrixPtr Matrix)
1303 {
1304 int I;
1305 ArrayOfElementPtrs Diag;
1306 RealNumber MaxPivot, MinPivot, Mag;
1307
1308 /* Begin `spPseudoCondition'. */
1309
1310 assert( IS_SPARSE(Matrix) && IS_FACTORED(Matrix) );
1311 if (Matrix->Error == spSINGULAR || Matrix->Error == spZERO_DIAG)
1312 return 0.0;
1313
1314 Diag = Matrix->Diag;
1315 MaxPivot = MinPivot = ELEMENT_MAG( Diag[1] );
1316 for (I = 2; I <= Matrix->Size; I++)
1317 {
1318 Mag = ELEMENT_MAG( Diag[I] );
1319 if (Mag > MaxPivot)
1320 MaxPivot = Mag;
1321 else if (Mag < MinPivot)
1322 MinPivot = Mag;
1323 }
1324 assert( MaxPivot > 0.0);
1325 return MaxPivot / MinPivot;
1326 }
1327 #endif
1328
1329
1330
1331
1332
1333
1334
1335
1336 #if CONDITION
1337 /*
1338 * ESTIMATE CONDITION NUMBER
1339 *
1340 * Computes an estimate of the condition number using a variation on
1341 * the LINPACK condition number estimation algorithm. This quantity
1342 * is an indicator of ill-conditioning in the matrix. To avoid
1343 * problems with overflow, the reciprocal of the condition number is
1344 * returned. If this number is small, and if the matrix is scaled
1345 * such that uncertainties in the RHS and the matrix entries are
1346 * equilibrated, then the matrix is ill-conditioned. If the this
1347 * number is near one, the matrix is well conditioned. This routine
1348 * must only be used after a matrix has been factored by
1349 * spOrderAndFactor() or spFactor() and before it is cleared by
1350 * spClear() or spInitialize().
1351 *
1352 * Unlike the LINPACK condition number estimator, this routines
1353 * returns the L infinity condition number. This is an artifact of
1354 * Sparse placing ones on the diagonal of the upper triangular matrix
1355 * rather than the lower. This difference should be of no
1356 * importance.
1357 *
1358 * References:
1359 * A.K. Cline, C.B. Moler, G.W. Stewart, J.H. Wilkinson. An estimate
1360 * for the condition number of a matrix. SIAM Journal on Numerical
1361 * Analysis. Vol. 16, No. 2, pages 368-375, April 1979.
1362 *
1363 * J.J. Dongarra, C.B. Moler, J.R. Bunch, G.W. Stewart. LINPACK
1364 * User's Guide. SIAM, 1979.
1365 *
1366 * Roger G. Grimes, John G. Lewis. Condition number estimation for
1367 * sparse matrices. SIAM Journal on Scientific and Statistical
1368 * Computing. Vol. 2, No. 4, pages 384-388, December 1981.
1369 *
1370 * Dianne Prost O'Leary. Estimating matrix condition numbers. SIAM
1371 * Journal on Scientific and Statistical Computing. Vol. 1, No. 2,
1372 * pages 205-209, June 1980.
1373 *
1374 * >>> Returns:
1375 * The reciprocal of the condition number. If the matrix was singular,
1376 * zero is returned.
1377 *
1378 * >>> Arguments:
1379 * Matrix <input> (char *)
1380 * Pointer to the matrix.
1381 * NormOfMatrix <input> (RealNumber)
1382 * The L-infinity norm of the unfactored matrix as computed by
1383 * spNorm().
1384 * pError <output> (int *)
1385 * Used to return error code.
1386 *
1387 * >>> Possible errors:
1388 * spSINGULAR
1389 * spNO_MEMORY */
1390
1391 RealNumber
spCondition(MatrixPtr Matrix,RealNumber NormOfMatrix,int * pError)1392 spCondition(MatrixPtr Matrix, RealNumber NormOfMatrix, int *pError)
1393 {
1394 ElementPtr pElement;
1395 RealVector T, Tm;
1396 int I, K, Row;
1397 ElementPtr pPivot;
1398 int Size;
1399 RealNumber E, Em, Wp, Wm, ASp, ASm, ASw, ASy, ASv, ASz, MaxY, ScaleFactor;
1400 RealNumber Linpack, OLeary, InvNormOfInverse, ComplexCondition();
1401 #define SLACK 1e4
1402
1403 /* Begin `spCondition'. */
1404
1405 assert( IS_SPARSE(Matrix) && IS_FACTORED(Matrix) );
1406 *pError = Matrix->Error;
1407 if (Matrix->Error >= spFATAL) return 0.0;
1408 if (NormOfMatrix == 0.0)
1409 {
1410 *pError = spSINGULAR;
1411 return 0.0;
1412 }
1413
1414 if (Matrix->Complex)
1415 return ComplexCondition( Matrix, NormOfMatrix, pError );
1416
1417 Size = Matrix->Size;
1418 T = Matrix->Intermediate;
1419 Tm = Matrix->Intermediate + Size;
1420 for (I = Size; I > 0; I--) T[I] = 0.0;
1421
1422 /*
1423 * Part 1. Ay = e.
1424 *
1425 * Solve Ay = LUy = e where e consists of +1 and -1 terms with the
1426 * sign chosen to maximize the size of w in Lw = e. Since the
1427 * terms in w can get very large, scaling is used to avoid
1428 * overflow. */
1429
1430 /* Forward elimination. Solves Lw = e while choosing e. */
1431 E = 1.0;
1432 for (I = 1; I <= Size; I++)
1433 {
1434 pPivot = Matrix->Diag[I];
1435 if (T[I] < 0.0) Em = -E; else Em = E;
1436 Wm = (Em + T[I]) * pPivot->Real;
1437 if (ABS(Wm) > SLACK)
1438 {
1439 ScaleFactor = 1.0 / MAX( SQR( SLACK ), ABS(Wm) );
1440 for (K = Size; K > 0; K--) T[K] *= ScaleFactor;
1441 E *= ScaleFactor;
1442 Em *= ScaleFactor;
1443 Wm = (Em + T[I]) * pPivot->Real;
1444 }
1445 Wp = (T[I] - Em) * pPivot->Real;
1446 ASp = ABS(T[I] - Em);
1447 ASm = ABS(Em + T[I]);
1448
1449 /* Update T for both values of W, minus value is placed in
1450 Tm. */
1451 pElement = pPivot->NextInCol;
1452 while (pElement != NULL)
1453 {
1454 Row = pElement->Row;
1455 Tm[Row] = T[Row] - (Wm * pElement->Real);
1456 T[Row] -= (Wp * pElement->Real);
1457 ASp += ABS(T[Row]);
1458 ASm += ABS(Tm[Row]);
1459 pElement = pElement->NextInCol;
1460 }
1461
1462 /* If minus value causes more growth, overwrite T with its
1463 values. */
1464 if (ASm > ASp)
1465 {
1466 T[I] = Wm;
1467 pElement = pPivot->NextInCol;
1468 while (pElement != NULL)
1469 {
1470 T[pElement->Row] = Tm[pElement->Row];
1471 pElement = pElement->NextInCol;
1472 }
1473 }
1474 else T[I] = Wp;
1475 }
1476
1477 /* Compute 1-norm of T, which now contains w, and scale ||T|| to
1478 1/SLACK. */
1479 for (ASw = 0.0, I = Size; I > 0; I--) ASw += ABS(T[I]);
1480 ScaleFactor = 1.0 / (SLACK * ASw);
1481 if (ScaleFactor < 0.5)
1482 {
1483 for (I = Size; I > 0; I--) T[I] *= ScaleFactor;
1484 E *= ScaleFactor;
1485 }
1486
1487 /* Backward Substitution. Solves Uy = w.*/
1488 for (I = Size; I >= 1; I--)
1489 {
1490 pElement = Matrix->Diag[I]->NextInRow;
1491 while (pElement != NULL)
1492 {
1493 T[I] -= pElement->Real * T[pElement->Col];
1494 pElement = pElement->NextInRow;
1495 }
1496 if (ABS(T[I]) > SLACK)
1497 {
1498 ScaleFactor = 1.0 / MAX( SQR( SLACK ), ABS(T[I]) );
1499 for (K = Size; K > 0; K--) T[K] *= ScaleFactor;
1500 E *= ScaleFactor;
1501 }
1502 }
1503
1504 /* Compute 1-norm of T, which now contains y, and scale ||T|| to
1505 1/SLACK. */
1506 for (ASy = 0.0, I = Size; I > 0; I--) ASy += ABS(T[I]);
1507 ScaleFactor = 1.0 / (SLACK * ASy);
1508 if (ScaleFactor < 0.5)
1509 {
1510 for (I = Size; I > 0; I--) T[I] *= ScaleFactor;
1511 ASy = 1.0 / SLACK;
1512 E *= ScaleFactor;
1513 }
1514
1515 /* Compute infinity-norm of T for O'Leary's estimate. */
1516 for (MaxY = 0.0, I = Size; I > 0; I--)
1517 if (MaxY < ABS(T[I])) MaxY = ABS(T[I]);
1518
1519 /*
1520 * Part 2.
1521 *
1522 * A* z = y where the * represents the transpose. Recall that A =
1523 * LU implies A* = U* L*. */
1524
1525 /* Forward elimination, U* v = y. */
1526 for (I = 1; I <= Size; I++)
1527 {
1528 pElement = Matrix->Diag[I]->NextInRow;
1529 while (pElement != NULL)
1530 {
1531 T[pElement->Col] -= T[I] * pElement->Real;
1532 pElement = pElement->NextInRow;
1533 }
1534 if (ABS(T[I]) > SLACK)
1535 {
1536 ScaleFactor = 1.0 / MAX( SQR( SLACK ), ABS(T[I]) );
1537 for (K = Size; K > 0; K--) T[K] *= ScaleFactor;
1538 ASy *= ScaleFactor;
1539 }
1540 }
1541
1542 /* Compute 1-norm of T, which now contains v, and scale ||T|| to 1/SLACK. */
1543 for (ASv = 0.0, I = Size; I > 0; I--) ASv += ABS(T[I]);
1544 ScaleFactor = 1.0 / (SLACK * ASv);
1545 if (ScaleFactor < 0.5)
1546 {
1547 for (I = Size; I > 0; I--) T[I] *= ScaleFactor;
1548 ASy *= ScaleFactor;
1549 }
1550
1551 /* Backward Substitution, L* z = v. */
1552 for (I = Size; I >= 1; I--)
1553 {
1554 pPivot = Matrix->Diag[I];
1555 pElement = pPivot->NextInCol;
1556 while (pElement != NULL)
1557 {
1558 T[I] -= pElement->Real * T[pElement->Row];
1559 pElement = pElement->NextInCol;
1560 }
1561 T[I] *= pPivot->Real;
1562 if (ABS(T[I]) > SLACK)
1563 {
1564 ScaleFactor = 1.0 / MAX( SQR( SLACK ), ABS(T[I]) );
1565 for (K = Size; K > 0; K--) T[K] *= ScaleFactor;
1566 ASy *= ScaleFactor;
1567 }
1568 }
1569
1570 /* Compute 1-norm of T, which now contains z. */
1571 for (ASz = 0.0, I = Size; I > 0; I--) ASz += ABS(T[I]);
1572
1573 Linpack = ASy / ASz;
1574 OLeary = E / MaxY;
1575 InvNormOfInverse = MIN( Linpack, OLeary );
1576 return InvNormOfInverse / NormOfMatrix;
1577 }
1578
1579
1580
1581
1582
1583 /*
1584 * ESTIMATE CONDITION NUMBER
1585 *
1586 * Complex version of spCondition().
1587 *
1588 * >>> Returns:
1589 * The reciprocal of the condition number.
1590 *
1591 * >>> Arguments:
1592 * Matrix <input> (MatrixPtr)
1593 * Pointer to the matrix.
1594 * NormOfMatrix <input> (RealNumber)
1595 * The L-infinity norm of the unfactored matrix as computed by
1596 * spNorm().
1597 * pError <output> (int *)
1598 * Used to return error code.
1599 *
1600 * >>> Possible errors:
1601 * spNO_MEMORY
1602 */
1603
1604 static RealNumber
ComplexCondition(Matrix,NormOfMatrix,pError)1605 ComplexCondition( Matrix, NormOfMatrix, pError )
1606
1607 MatrixPtr Matrix;
1608 RealNumber NormOfMatrix;
1609 int *pError;
1610 {
1611 ElementPtr pElement;
1612 ComplexVector T, Tm;
1613 int I, K, Row;
1614 ElementPtr pPivot;
1615 int Size;
1616 RealNumber E, Em, ASp, ASm, ASw, ASy, ASv, ASz, MaxY, ScaleFactor;
1617 RealNumber Linpack, OLeary, InvNormOfInverse;
1618 ComplexNumber Wp, Wm;
1619
1620 /* Begin `ComplexCondition'. */
1621
1622 Size = Matrix->Size;
1623 T = (ComplexVector)Matrix->Intermediate;
1624 Tm = SP_MALLOC( ComplexNumber, Size+1 );
1625 if (Tm == NULL)
1626 {
1627 *pError = spNO_MEMORY;
1628 return 0.0;
1629 }
1630 for (I = Size; I > 0; I--) T[I].Real = T[I].Imag = 0.0;
1631
1632 /*
1633 * Part 1. Ay = e.
1634 *
1635 * Solve Ay = LUy = e where e consists of +1 and -1 terms with the
1636 * sign chosen to maximize the size of w in Lw = e. Since the
1637 * terms in w can get very large, scaling is used to avoid
1638 * overflow. */
1639
1640 /* Forward elimination. Solves Lw = e while choosing e. */
1641 E = 1.0;
1642 for (I = 1; I <= Size; I++)
1643 {
1644 pPivot = Matrix->Diag[I];
1645 if (T[I].Real < 0.0) Em = -E; else Em = E;
1646 Wm = T[I];
1647 Wm.Real += Em;
1648 ASm = CMPLX_1_NORM( Wm );
1649 CMPLX_MULT_ASSIGN( Wm, *pPivot );
1650 if (CMPLX_1_NORM(Wm) > SLACK)
1651 {
1652 ScaleFactor = 1.0 / MAX( SQR( SLACK ), CMPLX_1_NORM(Wm) );
1653 for (K = Size; K > 0; K--) SCLR_MULT_ASSIGN( T[K], ScaleFactor );
1654 E *= ScaleFactor;
1655 Em *= ScaleFactor;
1656 ASm *= ScaleFactor;
1657 SCLR_MULT_ASSIGN( Wm, ScaleFactor );
1658 }
1659 Wp = T[I];
1660 Wp.Real -= Em;
1661 ASp = CMPLX_1_NORM( Wp );
1662 CMPLX_MULT_ASSIGN( Wp, *pPivot );
1663
1664 /* Update T for both values of W, minus value is placed in Tm. */
1665 pElement = pPivot->NextInCol;
1666 while (pElement != NULL)
1667 {
1668 Row = pElement->Row;
1669 /* Cmplx expr: Tm[Row] = T[Row] - (Wp * *pElement). */
1670 CMPLX_MULT_SUBT( Tm[Row], Wm, *pElement, T[Row] );
1671 /* Cmplx expr: T[Row] -= Wp * *pElement. */
1672 CMPLX_MULT_SUBT_ASSIGN( T[Row], Wm, *pElement );
1673 ASp += CMPLX_1_NORM(T[Row]);
1674 ASm += CMPLX_1_NORM(Tm[Row]);
1675 pElement = pElement->NextInCol;
1676 }
1677
1678 /* If minus value causes more growth, overwrite T with its
1679 values. */
1680 if (ASm > ASp)
1681 {
1682 T[I] = Wm;
1683 pElement = pPivot->NextInCol;
1684 while (pElement != NULL)
1685 {
1686 T[pElement->Row] = Tm[pElement->Row];
1687 pElement = pElement->NextInCol;
1688 }
1689 }
1690 else T[I] = Wp;
1691 }
1692
1693 /* Compute 1-norm of T, which now contains w, and scale ||T|| to
1694 1/SLACK. */
1695 for (ASw = 0.0, I = Size; I > 0; I--) ASw += CMPLX_1_NORM(T[I]);
1696 ScaleFactor = 1.0 / (SLACK * ASw);
1697 if (ScaleFactor < 0.5)
1698 {
1699 for (I = Size; I > 0; I--) SCLR_MULT_ASSIGN( T[I], ScaleFactor );
1700 E *= ScaleFactor;
1701 }
1702
1703 /* Backward Substitution. Solves Uy = w.*/
1704 for (I = Size; I >= 1; I--)
1705 {
1706 pElement = Matrix->Diag[I]->NextInRow;
1707 while (pElement != NULL)
1708 {
1709 /* Cmplx expr: T[I] -= T[pElement->Col] * *pElement. */
1710 CMPLX_MULT_SUBT_ASSIGN( T[I], T[pElement->Col], *pElement );
1711 pElement = pElement->NextInRow;
1712 }
1713 if (CMPLX_1_NORM(T[I]) > SLACK)
1714 {
1715 ScaleFactor = 1.0 / MAX( SQR( SLACK ), CMPLX_1_NORM(T[I]) );
1716 for (K = Size; K > 0; K--) SCLR_MULT_ASSIGN( T[K], ScaleFactor );
1717 E *= ScaleFactor;
1718 }
1719 }
1720
1721 /* Compute 1-norm of T, which now contains y, and scale ||T|| to
1722 1/SLACK. */
1723 for (ASy = 0.0, I = Size; I > 0; I--) ASy += CMPLX_1_NORM(T[I]);
1724 ScaleFactor = 1.0 / (SLACK * ASy);
1725 if (ScaleFactor < 0.5)
1726 {
1727 for (I = Size; I > 0; I--) SCLR_MULT_ASSIGN( T[I], ScaleFactor );
1728 ASy = 1.0 / SLACK;
1729 E *= ScaleFactor;
1730 }
1731
1732 /* Compute infinity-norm of T for O'Leary's estimate. */
1733 for (MaxY = 0.0, I = Size; I > 0; I--)
1734 if (MaxY < CMPLX_1_NORM(T[I])) MaxY = CMPLX_1_NORM(T[I]);
1735
1736 /* Part 2. A* z = y where the * represents the transpose. Recall
1737 * that A = LU implies A* = U* L*. */
1738
1739 /* Forward elimination, U* v = y. */
1740 for (I = 1; I <= Size; I++)
1741 {
1742 pElement = Matrix->Diag[I]->NextInRow;
1743 while (pElement != NULL)
1744 {
1745 /* Cmplx expr: T[pElement->Col] -= T[I] * *pElement. */
1746 CMPLX_MULT_SUBT_ASSIGN( T[pElement->Col], T[I], *pElement );
1747 pElement = pElement->NextInRow;
1748 }
1749 if (CMPLX_1_NORM(T[I]) > SLACK)
1750 {
1751 ScaleFactor = 1.0 / MAX( SQR( SLACK ), CMPLX_1_NORM(T[I]) );
1752 for (K = Size; K > 0; K--) SCLR_MULT_ASSIGN( T[K], ScaleFactor );
1753 ASy *= ScaleFactor;
1754 }
1755 }
1756
1757 /* Compute 1-norm of T, which now contains v, and scale ||T|| to
1758 1/SLACK. */
1759 for (ASv = 0.0, I = Size; I > 0; I--) ASv += CMPLX_1_NORM(T[I]);
1760 ScaleFactor = 1.0 / (SLACK * ASv);
1761 if (ScaleFactor < 0.5)
1762 {
1763 for (I = Size; I > 0; I--) SCLR_MULT_ASSIGN( T[I], ScaleFactor );
1764 ASy *= ScaleFactor;
1765 }
1766
1767 /* Backward Substitution, L* z = v. */
1768 for (I = Size; I >= 1; I--)
1769 {
1770 pPivot = Matrix->Diag[I];
1771 pElement = pPivot->NextInCol;
1772 while (pElement != NULL)
1773 {
1774 /* Cmplx expr: T[I] -= T[pElement->Row] * *pElement. */
1775 CMPLX_MULT_SUBT_ASSIGN( T[I], T[pElement->Row], *pElement );
1776 pElement = pElement->NextInCol;
1777 }
1778 CMPLX_MULT_ASSIGN( T[I], *pPivot );
1779 if (CMPLX_1_NORM(T[I]) > SLACK)
1780 {
1781 ScaleFactor = 1.0 / MAX( SQR( SLACK ), CMPLX_1_NORM(T[I]) );
1782 for (K = Size; K > 0; K--) SCLR_MULT_ASSIGN( T[K], ScaleFactor );
1783 ASy *= ScaleFactor;
1784 }
1785 }
1786
1787 /* Compute 1-norm of T, which now contains z. */
1788 for (ASz = 0.0, I = Size; I > 0; I--) ASz += CMPLX_1_NORM(T[I]);
1789
1790 SP_FREE( Tm );
1791
1792 Linpack = ASy / ASz;
1793 OLeary = E / MaxY;
1794 InvNormOfInverse = MIN( Linpack, OLeary );
1795 return InvNormOfInverse / NormOfMatrix;
1796 }
1797
1798
1799
1800
1801
1802 /*
1803 * L-INFINITY MATRIX NORM
1804 *
1805 * Computes the L-infinity norm of an unfactored matrix. It is a fatal
1806 * error to pass this routine a factored matrix.
1807 *
1808 * One difficulty is that the rows may not be linked.
1809 *
1810 * >>> Returns:
1811 * The largest absolute row sum of matrix.
1812 *
1813 * >>> Arguments:
1814 * Matrix <input> (char *)
1815 * Pointer to the matrix.
1816 */
1817
1818 RealNumber
spNorm(MatrixPtr Matrix)1819 spNorm(MatrixPtr Matrix)
1820 {
1821 ElementPtr pElement;
1822 int I;
1823 RealNumber Max = 0.0, AbsRowSum;
1824
1825 /* Begin `spNorm'. */
1826 assert( IS_SPARSE(Matrix) && !IS_FACTORED(Matrix) );
1827 if (!Matrix->RowsLinked) spcLinkRows( Matrix );
1828
1829 /* Compute row sums. */
1830 if (!Matrix->Complex)
1831 {
1832 for (I = Matrix->Size; I > 0; I--)
1833 {
1834 pElement = Matrix->FirstInRow[I];
1835 AbsRowSum = 0.0;
1836 while (pElement != NULL)
1837 {
1838 AbsRowSum += ABS( pElement->Real );
1839 pElement = pElement->NextInRow;
1840 }
1841 if (Max < AbsRowSum) Max = AbsRowSum;
1842 }
1843 }
1844 else
1845 {
1846 for (I = Matrix->Size; I > 0; I--)
1847 {
1848 pElement = Matrix->FirstInRow[I];
1849 AbsRowSum = 0.0;
1850 while (pElement != NULL)
1851 {
1852 AbsRowSum += CMPLX_1_NORM( *pElement );
1853 pElement = pElement->NextInRow;
1854 }
1855 if (Max < AbsRowSum) Max = AbsRowSum;
1856 }
1857 }
1858 return Max;
1859 }
1860 #endif /* CONDITION */
1861
1862
1863
1864
1865
1866
1867 #if STABILITY
1868 /*
1869 * STABILITY OF FACTORIZATION
1870 *
1871 * The following routines are used to gauge the stability of a
1872 * factorization. If the factorization is determined to be too
1873 * unstable, then the matrix should be reordered. The routines
1874 * compute quantities that are needed in the computation of a bound
1875 * on the error attributed to any one element in the matrix during
1876 * the factorization. In other words, there is a matrix E = [e_ij]
1877 * of error terms such that A+E = LU. This routine finds a bound on
1878 * |e_ij|. Erisman & Reid [1] showed that |e_ij| < 3.01 u rho m_ij,
1879 * where u is the machine rounding unit, rho = max a_ij where the max
1880 * is taken over every row i, column j, and step k, and m_ij is the
1881 * number of multiplications required in the computation of l_ij if i
1882 * > j or u_ij otherwise. Barlow [2] showed that rho < max_i || l_i
1883 * ||_p max_j || u_j ||_q where 1/p + 1/q = 1.
1884 *
1885 * The first routine finds the magnitude on the largest element in
1886 * the matrix. If the matrix has not yet been factored, the largest
1887 * element is found by direct search. If the matrix is factored, a
1888 * bound on the largest element in any of the reduced submatrices is
1889 * computed using Barlow with p = oo and q = 1. The ratio of these
1890 * two numbers is the growth, which can be used to determine if the
1891 * pivoting order is adequate. A large growth implies that
1892 * considerable error has been made in the factorization and that it
1893 * is probably a good idea to reorder the matrix. If a large growth
1894 * in encountered after using spFactor(), reconstruct the matrix and
1895 * refactor using spOrderAndFactor(). If a large growth is
1896 * encountered after using spOrderAndFactor(), refactor using
1897 * spOrderAndFactor() with the pivot threshold increased, say to 0.1.
1898 *
1899 * Using only the size of the matrix as an upper bound on m_ij and
1900 * Barlow's bound, the user can estimate the size of the matrix error
1901 * terms e_ij using the bound of Erisman and Reid. The second
1902 * routine computes a tighter bound (with more work) based on work by
1903 * Gear [3], |e_ij| < 1.01 u rho (t c^3 + (1 + t)c^2) where t is the
1904 * threshold and c is the maximum number of off-diagonal elements in
1905 * any row of L. The expensive part of computing this bound is
1906 * determining the maximum number of off-diagonals in L, which
1907 * changes only when the order of the matrix changes. This number is
1908 * computed and saved, and only recomputed if the matrix is
1909 * reordered.
1910 *
1911 * [1] A. M. Erisman, J. K. Reid. Monitoring the stability of the
1912 * triangular factorization of a sparse matrix. Numerische
1913 * Mathematik. Vol. 22, No. 3, 1974, pp 183-186.
1914 *
1915 * [2] J. L. Barlow. A note on monitoring the stability of triangular
1916 * decomposition of sparse matrices. "SIAM Journal of Scientific
1917 * and Statistical Computing." Vol. 7, No. 1, January 1986, pp 166-168.
1918 *
1919 * [3] I. S. Duff, A. M. Erisman, J. K. Reid. "Direct Methods for Sparse
1920 * Matrices." Oxford 1986. pp 99. */
1921
1922 /*
1923 * LARGEST ELEMENT IN MATRIX
1924 *
1925 * >>> Returns:
1926 * If matrix is not factored, returns the magnitude of the largest element in
1927 * the matrix. If the matrix is factored, a bound on the magnitude of the
1928 * largest element in any of the reduced submatrices is returned.
1929 *
1930 * >>> Arguments:
1931 * Matrix <input> (char *)
1932 * Pointer to the matrix.
1933 */
1934
1935 RealNumber
spLargestElement(MatrixPtr Matrix)1936 spLargestElement(MatrixPtr Matrix)
1937 {
1938 int I;
1939 RealNumber Mag, AbsColSum, Max = 0.0, MaxRow = 0.0, MaxCol = 0.0;
1940 RealNumber Pivot;
1941 ComplexNumber cPivot;
1942 ElementPtr pElement, pDiag;
1943
1944 /* Begin `spLargestElement'. */
1945 assert( IS_SPARSE(Matrix) );
1946
1947 if (Matrix->Factored && !Matrix->Complex)
1948 {
1949 if (Matrix->Error == spSINGULAR) return 0.0;
1950
1951 /* Find the bound on the size of the largest element over all
1952 factorization. */
1953 for (I = 1; I <= Matrix->Size; I++)
1954 {
1955 pDiag = Matrix->Diag[I];
1956
1957 /* Lower triangular matrix. */
1958 Pivot = 1.0 / pDiag->Real;
1959 Mag = ABS( Pivot );
1960 if (Mag > MaxRow) MaxRow = Mag;
1961 pElement = Matrix->FirstInRow[I];
1962 while (pElement != pDiag)
1963 {
1964 Mag = ABS( pElement->Real );
1965 if (Mag > MaxRow) MaxRow = Mag;
1966 pElement = pElement->NextInRow;
1967 }
1968
1969 /* Upper triangular matrix. */
1970 pElement = Matrix->FirstInCol[I];
1971 AbsColSum = 1.0; /* Diagonal of U is unity. */
1972 while (pElement != pDiag)
1973 {
1974 AbsColSum += ABS( pElement->Real );
1975 pElement = pElement->NextInCol;
1976 }
1977 if (AbsColSum > MaxCol) MaxCol = AbsColSum;
1978 }
1979 }
1980 else if (!Matrix->Complex)
1981 {
1982 for (I = 1; I <= Matrix->Size; I++)
1983 {
1984 pElement = Matrix->FirstInCol[I];
1985 while (pElement != NULL)
1986 {
1987 Mag = ABS( pElement->Real );
1988 if (Mag > Max) Max = Mag;
1989 pElement = pElement->NextInCol;
1990 }
1991 }
1992 return Max;
1993 }
1994 if (Matrix->Factored && Matrix->Complex)
1995 {
1996 if (Matrix->Error == spSINGULAR) return 0.0;
1997
1998 /* Find the bound on the size of the largest element over all
1999 factorization. */
2000 for (I = 1; I <= Matrix->Size; I++)
2001 {
2002 pDiag = Matrix->Diag[I];
2003
2004 /* Lower triangular matrix. */
2005 CMPLX_RECIPROCAL( cPivot, *pDiag );
2006 Mag = CMPLX_1_NORM( cPivot );
2007 if (Mag > MaxRow) MaxRow = Mag;
2008 pElement = Matrix->FirstInRow[I];
2009 while (pElement != pDiag)
2010 {
2011 Mag = CMPLX_1_NORM( *pElement );
2012 if (Mag > MaxRow) MaxRow = Mag;
2013 pElement = pElement->NextInRow;
2014 }
2015
2016 /* Upper triangular matrix. */
2017 pElement = Matrix->FirstInCol[I];
2018 AbsColSum = 1.0; /* Diagonal of U is unity. */
2019 while (pElement != pDiag)
2020 {
2021 AbsColSum += CMPLX_1_NORM( *pElement );
2022 pElement = pElement->NextInCol;
2023 }
2024 if (AbsColSum > MaxCol) MaxCol = AbsColSum;
2025 }
2026 }
2027 else if (Matrix->Complex)
2028 {
2029 for (I = 1; I <= Matrix->Size; I++)
2030 {
2031 pElement = Matrix->FirstInCol[I];
2032 while (pElement != NULL)
2033 {
2034 Mag = CMPLX_1_NORM( *pElement );
2035 if (Mag > Max) Max = Mag;
2036 pElement = pElement->NextInCol;
2037 }
2038 }
2039 return Max;
2040 }
2041 return MaxRow * MaxCol;
2042 }
2043
2044
2045
2046
2047 /*
2048 * MATRIX ROUNDOFF ERROR
2049 *
2050 * >>> Returns:
2051 * Returns a bound on the magnitude of the largest element in E = A - LU.
2052 *
2053 * >>> Arguments:
2054 * Matrix <input> (char *)
2055 * Pointer to the matrix.
2056 * Rho <input> (RealNumber)
2057 * The bound on the magnitude of the largest element in any of the
2058 * reduced submatrices. This is the number computed by the function
2059 * spLargestElement() when given a factored matrix. If this number is
2060 * negative, the bound will be computed automatically.
2061 */
2062
2063 RealNumber
spRoundoff(MatrixPtr Matrix,RealNumber Rho)2064 spRoundoff(MatrixPtr Matrix, RealNumber Rho)
2065 {
2066 ElementPtr pElement;
2067 int Count, I, MaxCount = 0;
2068 RealNumber Reid, Gear;
2069
2070 /* Begin `spRoundoff'. */
2071 assert( IS_SPARSE(Matrix) && IS_FACTORED(Matrix) );
2072
2073 /* Compute Barlow's bound if it is not given. */
2074 if (Rho < 0.0) Rho = spLargestElement( Matrix );
2075
2076 /* Find the maximum number of off-diagonals in L if not previously computed. */
2077 if (Matrix->MaxRowCountInLowerTri < 0)
2078 {
2079 for (I = Matrix->Size; I > 0; I--)
2080 {
2081 pElement = Matrix->FirstInRow[I];
2082 Count = 0;
2083 while (pElement->Col < I)
2084 {
2085 Count++;
2086 pElement = pElement->NextInRow;
2087 }
2088 if (Count > MaxCount) MaxCount = Count;
2089 }
2090 Matrix->MaxRowCountInLowerTri = MaxCount;
2091 }
2092 else MaxCount = Matrix->MaxRowCountInLowerTri;
2093
2094 /* Compute error bound. */
2095 Gear = 1.01*((MaxCount + 1) * Matrix->RelThreshold + 1.0) * SQR(MaxCount);
2096 Reid = 3.01 * Matrix->Size;
2097
2098 if (Gear < Reid)
2099 return (MACHINE_RESOLUTION * Rho * Gear);
2100 else
2101 return (MACHINE_RESOLUTION * Rho * Reid);
2102 }
2103 #endif
2104
2105
2106
2107
2108
2109
2110
2111
2112 #if DOCUMENTATION
2113 /*
2114 * SPARSE ERROR MESSAGE
2115 *
2116 * This routine prints a short message to a stream describing the error
2117 * error state of sparse. No message is produced if there is no error.
2118 *
2119 * >>> Arguments:
2120 * Matrix <input> (char *)
2121 * Matrix for which the error message is to be printed.
2122 * Stream <input> (FILE *)
2123 * Stream to which the error message is to be printed.
2124 * Originator <input> (char *)
2125 * Name of originator of error message. If NULL, `sparse' is used.
2126 * If zero-length string, no originator is printed.
2127 */
2128
2129 void
spErrorMessage(MatrixPtr Matrix,FILE * Stream,char * Originator)2130 spErrorMessage(MatrixPtr Matrix, FILE *Stream, char *Originator)
2131 {
2132 int Row, Col, Error;
2133
2134 /* Begin `spErrorMessage'. */
2135 if (Matrix == NULL)
2136 Error = spNO_MEMORY;
2137 else
2138 {
2139 assert(Matrix->ID == SPARSE_ID);
2140 Error = Matrix->Error;
2141 }
2142
2143 if (Error == spOKAY) return;
2144 if (Originator == NULL) Originator = "sparse";
2145 if (Originator[0] != '\0') fprintf( Stream, "%s: ", Originator);
2146 if (Error >= spFATAL)
2147 fprintf( Stream, "fatal error, ");
2148 else
2149 fprintf( Stream, "warning, ");
2150
2151 /* Print particular error message. Do not use switch statement
2152 * because error codes may not be unique. */
2153 if (Error == spPANIC)
2154 fprintf( Stream, "Sparse called improperly.\n");
2155 else if (Error == spNO_MEMORY)
2156 fprintf( Stream, "insufficient memory available.\n");
2157 else if (Error == spSINGULAR)
2158 {
2159 spWhereSingular( Matrix, &Row, &Col );
2160 fprintf( Stream, "singular matrix detected at row %d and column %d.\n",
2161 Row, Col);
2162 }
2163 else if (Error == spZERO_DIAG)
2164 {
2165 spWhereSingular( Matrix, &Row, &Col );
2166 fprintf( Stream, "zero diagonal detected at row %d and column %d.\n",
2167 Row, Col);
2168 }
2169 else if (Error == spSMALL_PIVOT)
2170 {
2171 fprintf( Stream,
2172 "unable to find a pivot that is larger than absolute threshold.\n");
2173 }
2174 else
2175 {
2176 abort();
2177 }
2178 return;
2179 }
2180 #endif /* DOCUMENTATION */
2181