1 /*  -- translated by f2c (version 20100827).
2    You must link the resulting object file with libf2c:
3 	on Microsoft Windows system, link with libf2c.lib;
4 	on Linux or Unix systems, link with .../path/to/libf2c.a -lm
5 	or, if you install libf2c.a in a standard place, with -lf2c -lm
6 	-- in that order, at the end of the command line, as in
7 		cc *.o -lf2c -lm
8 	Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
9 
10 		http://www.netlib.org/f2c/libf2c.zip
11 */
12 
13 #include "../config.h"
14 #include "f2c.h"
15 
16 #if BLAS_COMPLEX_FUNCTIONS_AS_ROUTINES
zdotu_fun(int * n,doublecomplex * x,blasint * incx,doublecomplex * y,blasint * incy)17 doublecomplex zdotu_fun(int *n, doublecomplex *x, blasint *incx, doublecomplex *y, blasint *incy) {
18     extern void zdotu_(doublecomplex *, blasint *, doublecomplex *, blasint *, doublecomplex *, blasint *);
19     doublecomplex result;
20     zdotu_(&result, n, x, incx, y, incy);
21     return result;
22 }
23 #define zdotu_ zdotu_fun
24 
zdotc_fun(int * n,doublecomplex * x,blasint * incx,doublecomplex * y,blasint * incy)25 doublecomplex zdotc_fun(int *n, doublecomplex *x, blasint *incx, doublecomplex *y, blasint *incy) {
26     extern void zdotc_(doublecomplex *, blasint *, doublecomplex *, blasint *, doublecomplex *, blasint *);
27     doublecomplex result;
28     zdotc_(&result, n, x, incx, y, incy);
29     return result;
30 }
31 #define zdotc_ zdotc_fun
32 #endif
33 
34 #if LAPACK_BLAS_COMPLEX_FUNCTIONS_AS_ROUTINES
zladiv_fun(doublecomplex * a,doublecomplex * b)35 doublecomplex zladiv_fun(doublecomplex *a, doublecomplex *b) {
36     extern void zladiv_(doublecomplex *, doublecomplex *, doublecomplex *);
37     doublecomplex result;
38     zladiv_(&result, a, b);
39     return result;
40 }
41 #define zladiv_ zladiv_fun
42 #endif
43 
44 /* Table of constant values */
45 
46 static blasint c__1 = 1;
47 
48 /** RELAPACK_ZTRSYL_REC2 solves the complex Sylvester matrix equation (unblocked algorithm)
49  *
50  * This routine is an exact copy of LAPACK's ztrsyl.
51  * It serves as an unblocked kernel in the recursive algorithms.
52  * */
RELAPACK_ztrsyl_rec2(char * trana,char * tranb,int * isgn,blasint * m,blasint * n,doublecomplex * a,blasint * lda,doublecomplex * b,blasint * ldb,doublecomplex * c__,blasint * ldc,double * scale,blasint * info,ftnlen trana_len,ftnlen tranb_len)53 /* Subroutine */ void RELAPACK_ztrsyl_rec2(char *trana, char *tranb, int
54 	*isgn, blasint *m, blasint *n, doublecomplex *a, blasint *lda,
55 	doublecomplex *b, blasint *ldb, doublecomplex *c__, blasint *ldc,
56 	double *scale, blasint *info, ftnlen trana_len, ftnlen tranb_len)
57 {
58     /* System generated locals */
59     blasint a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
60 	    i__3, i__4;
61     double d__1, d__2;
62     doublecomplex z__1, z__2, z__3, z__4;
63 
64     /* Builtin functions */
65     double d_imag(doublecomplex *);
66     void d_cnjg(doublecomplex *, doublecomplex *);
67 
68     /* Local variables */
69     static blasint j, k, l;
70     static doublecomplex a11;
71     static double db;
72     static doublecomplex x11;
73     static double da11;
74     static doublecomplex vec;
75     static double dum[1], eps, sgn, smin;
76     static doublecomplex suml, sumr;
77     extern blasint lsame_(char *, char *, ftnlen, ftnlen);
78     /* Double Complex */ doublecomplex zdotc_(int *,
79 	    doublecomplex *, blasint *, doublecomplex *, blasint *), zdotu_(
80 	    blasint *, doublecomplex *, blasint *,
81 	    doublecomplex *, blasint *);
82     extern /* Subroutine */ blasint dlabad_(double *, double *);
83     extern double dlamch_(char *, ftnlen);
84     static double scaloc;
85     extern /* Subroutine */ blasint xerbla_(char *, blasint *, ftnlen);
86     extern double zlange_(char *, blasint *, blasint *, doublecomplex *,
87 	    blasint *, double *, ftnlen);
88     static double bignum;
89     extern /* Subroutine */ blasint zdscal_(int *, double *,
90 	    doublecomplex *, blasint *);
91     /* Double Complex */ doublecomplex zladiv_(doublecomplex *,
92 	     doublecomplex *);
93     static blasint notrna, notrnb;
94     static double smlnum;
95 
96     /* Parameter adjustments */
97     a_dim1 = *lda;
98     a_offset = 1 + a_dim1;
99     a -= a_offset;
100     b_dim1 = *ldb;
101     b_offset = 1 + b_dim1;
102     b -= b_offset;
103     c_dim1 = *ldc;
104     c_offset = 1 + c_dim1;
105     c__ -= c_offset;
106 
107     /* Function Body */
108     notrna = lsame_(trana, "N", (ftnlen)1, (ftnlen)1);
109     notrnb = lsame_(tranb, "N", (ftnlen)1, (ftnlen)1);
110     *info = 0;
111     if (! notrna && ! lsame_(trana, "C", (ftnlen)1, (ftnlen)1)) {
112 	*info = -1;
113     } else if (! notrnb && ! lsame_(tranb, "C", (ftnlen)1, (ftnlen)1)) {
114 	*info = -2;
115     } else if (*isgn != 1 && *isgn != -1) {
116 	*info = -3;
117     } else if (*m < 0) {
118 	*info = -4;
119     } else if (*n < 0) {
120 	*info = -5;
121     } else if (*lda < max(1,*m)) {
122 	*info = -7;
123     } else if (*ldb < max(1,*n)) {
124 	*info = -9;
125     } else if (*ldc < max(1,*m)) {
126 	*info = -11;
127     }
128     if (*info != 0) {
129 	i__1 = -(*info);
130 	xerbla_("ZTRSY2", &i__1, (ftnlen)6);
131 	return;
132     }
133     *scale = 1.;
134     if (*m == 0 || *n == 0) {
135 	return;
136     }
137     eps = dlamch_("P", (ftnlen)1);
138     smlnum = dlamch_("S", (ftnlen)1);
139     bignum = 1. / smlnum;
140     dlabad_(&smlnum, &bignum);
141     smlnum = smlnum * (double) (*m * *n) / eps;
142     bignum = 1. / smlnum;
143 /* Computing MAX */
144     d__1 = smlnum, d__2 = eps * zlange_("M", m, m, &a[a_offset], lda, dum, (
145 	    ftnlen)1), d__1 = max(d__1,d__2), d__2 = eps * zlange_("M", n, n,
146 	    &b[b_offset], ldb, dum, (ftnlen)1);
147     smin = max(d__1,d__2);
148     sgn = (double) (*isgn);
149     if (notrna && notrnb) {
150 	i__1 = *n;
151 	for (l = 1; l <= i__1; ++l) {
152 	    for (k = *m; k >= 1; --k) {
153 		i__2 = *m - k;
154 /* Computing MIN */
155 		i__3 = k + 1;
156 /* Computing MIN */
157 		i__4 = k + 1;
158 		z__1 = zdotu_(&i__2, &a[k + min(i__3,*m) * a_dim1], lda, &c__[
159 			min(i__4,*m) + l * c_dim1], &c__1);
160 		suml.r = z__1.r, suml.i = z__1.i;
161 		i__2 = l - 1;
162 		z__1 = zdotu_(&i__2, &c__[k + c_dim1], ldc, &b[l * b_dim1 + 1]
163 			, &c__1);
164 		sumr.r = z__1.r, sumr.i = z__1.i;
165 		i__2 = k + l * c_dim1;
166 		z__3.r = sgn * sumr.r, z__3.i = sgn * sumr.i;
167 		z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i;
168 		z__1.r = c__[i__2].r - z__2.r, z__1.i = c__[i__2].i - z__2.i;
169 		vec.r = z__1.r, vec.i = z__1.i;
170 		scaloc = 1.;
171 		i__2 = k + k * a_dim1;
172 		i__3 = l + l * b_dim1;
173 		z__2.r = sgn * b[i__3].r, z__2.i = sgn * b[i__3].i;
174 		z__1.r = a[i__2].r + z__2.r, z__1.i = a[i__2].i + z__2.i;
175 		a11.r = z__1.r, a11.i = z__1.i;
176 		da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs(
177 			d__2));
178 		if (da11 <= smin) {
179 		    a11.r = smin, a11.i = 0.;
180 		    da11 = smin;
181 		    *info = 1;
182 		}
183 		db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs(
184 			d__2));
185 		if (da11 < 1. && db > 1.) {
186 		    if (db > bignum * da11) {
187 			scaloc = 1. / db;
188 		    }
189 		}
190 		z__3.r = scaloc, z__3.i = 0.;
191 		z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r *
192 			z__3.i + vec.i * z__3.r;
193 		z__1 = zladiv_(&z__2, &a11);
194 		x11.r = z__1.r, x11.i = z__1.i;
195 		if (scaloc != 1.) {
196 		    i__2 = *n;
197 		    for (j = 1; j <= i__2; ++j) {
198 			zdscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
199 /* L10: */
200 		    }
201 		    *scale *= scaloc;
202 		}
203 		i__2 = k + l * c_dim1;
204 		c__[i__2].r = x11.r, c__[i__2].i = x11.i;
205 /* L20: */
206 	    }
207 /* L30: */
208 	}
209     } else if (! notrna && notrnb) {
210 	i__1 = *n;
211 	for (l = 1; l <= i__1; ++l) {
212 	    i__2 = *m;
213 	    for (k = 1; k <= i__2; ++k) {
214 		i__3 = k - 1;
215 		z__1 = zdotc_(&i__3, &a[k * a_dim1 + 1], &c__1, &c__[l *
216 			c_dim1 + 1], &c__1);
217 		suml.r = z__1.r, suml.i = z__1.i;
218 		i__3 = l - 1;
219 		z__1 = zdotu_(&i__3, &c__[k + c_dim1], ldc, &b[l * b_dim1 + 1]
220 			, &c__1);
221 		sumr.r = z__1.r, sumr.i = z__1.i;
222 		i__3 = k + l * c_dim1;
223 		z__3.r = sgn * sumr.r, z__3.i = sgn * sumr.i;
224 		z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i;
225 		z__1.r = c__[i__3].r - z__2.r, z__1.i = c__[i__3].i - z__2.i;
226 		vec.r = z__1.r, vec.i = z__1.i;
227 		scaloc = 1.;
228 		d_cnjg(&z__2, &a[k + k * a_dim1]);
229 		i__3 = l + l * b_dim1;
230 		z__3.r = sgn * b[i__3].r, z__3.i = sgn * b[i__3].i;
231 		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
232 		a11.r = z__1.r, a11.i = z__1.i;
233 		da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs(
234 			d__2));
235 		if (da11 <= smin) {
236 		    a11.r = smin, a11.i = 0.;
237 		    da11 = smin;
238 		    *info = 1;
239 		}
240 		db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs(
241 			d__2));
242 		if (da11 < 1. && db > 1.) {
243 		    if (db > bignum * da11) {
244 			scaloc = 1. / db;
245 		    }
246 		}
247 		z__3.r = scaloc, z__3.i = 0.;
248 		z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r *
249 			z__3.i + vec.i * z__3.r;
250 		z__1 = zladiv_(&z__2, &a11);
251 		x11.r = z__1.r, x11.i = z__1.i;
252 		if (scaloc != 1.) {
253 		    i__3 = *n;
254 		    for (j = 1; j <= i__3; ++j) {
255 			zdscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
256 /* L40: */
257 		    }
258 		    *scale *= scaloc;
259 		}
260 		i__3 = k + l * c_dim1;
261 		c__[i__3].r = x11.r, c__[i__3].i = x11.i;
262 /* L50: */
263 	    }
264 /* L60: */
265 	}
266     } else if (! notrna && ! notrnb) {
267 	for (l = *n; l >= 1; --l) {
268 	    i__1 = *m;
269 	    for (k = 1; k <= i__1; ++k) {
270 		i__2 = k - 1;
271 		z__1 = zdotc_(&i__2, &a[k * a_dim1 + 1], &c__1, &c__[l *
272 			c_dim1 + 1], &c__1);
273 		suml.r = z__1.r, suml.i = z__1.i;
274 		i__2 = *n - l;
275 /* Computing MIN */
276 		i__3 = l + 1;
277 /* Computing MIN */
278 		i__4 = l + 1;
279 		z__1 = zdotc_(&i__2, &c__[k + min(i__3,*n) * c_dim1], ldc, &b[
280 			l + min(i__4,*n) * b_dim1], ldb);
281 		sumr.r = z__1.r, sumr.i = z__1.i;
282 		i__2 = k + l * c_dim1;
283 		d_cnjg(&z__4, &sumr);
284 		z__3.r = sgn * z__4.r, z__3.i = sgn * z__4.i;
285 		z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i;
286 		z__1.r = c__[i__2].r - z__2.r, z__1.i = c__[i__2].i - z__2.i;
287 		vec.r = z__1.r, vec.i = z__1.i;
288 		scaloc = 1.;
289 		i__2 = k + k * a_dim1;
290 		i__3 = l + l * b_dim1;
291 		z__3.r = sgn * b[i__3].r, z__3.i = sgn * b[i__3].i;
292 		z__2.r = a[i__2].r + z__3.r, z__2.i = a[i__2].i + z__3.i;
293 		d_cnjg(&z__1, &z__2);
294 		a11.r = z__1.r, a11.i = z__1.i;
295 		da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs(
296 			d__2));
297 		if (da11 <= smin) {
298 		    a11.r = smin, a11.i = 0.;
299 		    da11 = smin;
300 		    *info = 1;
301 		}
302 		db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs(
303 			d__2));
304 		if (da11 < 1. && db > 1.) {
305 		    if (db > bignum * da11) {
306 			scaloc = 1. / db;
307 		    }
308 		}
309 		z__3.r = scaloc, z__3.i = 0.;
310 		z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r *
311 			z__3.i + vec.i * z__3.r;
312 		z__1 = zladiv_(&z__2, &a11);
313 		x11.r = z__1.r, x11.i = z__1.i;
314 		if (scaloc != 1.) {
315 		    i__2 = *n;
316 		    for (j = 1; j <= i__2; ++j) {
317 			zdscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
318 /* L70: */
319 		    }
320 		    *scale *= scaloc;
321 		}
322 		i__2 = k + l * c_dim1;
323 		c__[i__2].r = x11.r, c__[i__2].i = x11.i;
324 /* L80: */
325 	    }
326 /* L90: */
327 	}
328     } else if (notrna && ! notrnb) {
329 	for (l = *n; l >= 1; --l) {
330 	    for (k = *m; k >= 1; --k) {
331 		i__1 = *m - k;
332 /* Computing MIN */
333 		i__2 = k + 1;
334 /* Computing MIN */
335 		i__3 = k + 1;
336 		z__1 = zdotu_(&i__1, &a[k + min(i__2,*m) * a_dim1], lda, &c__[
337 			min(i__3,*m) + l * c_dim1], &c__1);
338 		suml.r = z__1.r, suml.i = z__1.i;
339 		i__1 = *n - l;
340 /* Computing MIN */
341 		i__2 = l + 1;
342 /* Computing MIN */
343 		i__3 = l + 1;
344 		z__1 = zdotc_(&i__1, &c__[k + min(i__2,*n) * c_dim1], ldc, &b[
345 			l + min(i__3,*n) * b_dim1], ldb);
346 		sumr.r = z__1.r, sumr.i = z__1.i;
347 		i__1 = k + l * c_dim1;
348 		d_cnjg(&z__4, &sumr);
349 		z__3.r = sgn * z__4.r, z__3.i = sgn * z__4.i;
350 		z__2.r = suml.r + z__3.r, z__2.i = suml.i + z__3.i;
351 		z__1.r = c__[i__1].r - z__2.r, z__1.i = c__[i__1].i - z__2.i;
352 		vec.r = z__1.r, vec.i = z__1.i;
353 		scaloc = 1.;
354 		i__1 = k + k * a_dim1;
355 		d_cnjg(&z__3, &b[l + l * b_dim1]);
356 		z__2.r = sgn * z__3.r, z__2.i = sgn * z__3.i;
357 		z__1.r = a[i__1].r + z__2.r, z__1.i = a[i__1].i + z__2.i;
358 		a11.r = z__1.r, a11.i = z__1.i;
359 		da11 = (d__1 = a11.r, abs(d__1)) + (d__2 = d_imag(&a11), abs(
360 			d__2));
361 		if (da11 <= smin) {
362 		    a11.r = smin, a11.i = 0.;
363 		    da11 = smin;
364 		    *info = 1;
365 		}
366 		db = (d__1 = vec.r, abs(d__1)) + (d__2 = d_imag(&vec), abs(
367 			d__2));
368 		if (da11 < 1. && db > 1.) {
369 		    if (db > bignum * da11) {
370 			scaloc = 1. / db;
371 		    }
372 		}
373 		z__3.r = scaloc, z__3.i = 0.;
374 		z__2.r = vec.r * z__3.r - vec.i * z__3.i, z__2.i = vec.r *
375 			z__3.i + vec.i * z__3.r;
376 		z__1 = zladiv_(&z__2, &a11);
377 		x11.r = z__1.r, x11.i = z__1.i;
378 		if (scaloc != 1.) {
379 		    i__1 = *n;
380 		    for (j = 1; j <= i__1; ++j) {
381 			zdscal_(m, &scaloc, &c__[j * c_dim1 + 1], &c__1);
382 /* L100: */
383 		    }
384 		    *scale *= scaloc;
385 		}
386 		i__1 = k + l * c_dim1;
387 		c__[i__1].r = x11.r, c__[i__1].i = x11.i;
388 /* L110: */
389 	    }
390 /* L120: */
391 	}
392     }
393     return;
394 }
395