1 SUBROUTINE DLAGS2( UPPER, A1, A2, A3, B1, B2, B3, CSU, SNU, CSV,
2 $ SNV, CSQ, SNQ )
3 *
4 * -- LAPACK auxiliary routine (version 3.3.1) --
5 * -- LAPACK is a software package provided by Univ. of Tennessee, --
6 * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
7 * -- April 2011 --
8 *
9 * .. Scalar Arguments ..
10 LOGICAL UPPER
11 DOUBLE PRECISION A1, A2, A3, B1, B2, B3, CSQ, CSU, CSV, SNQ,
12 $ SNU, SNV
13 * ..
14 *
15 * Purpose
16 * =======
17 *
18 * DLAGS2 computes 2-by-2 orthogonal matrices U, V and Q, such
19 * that if ( UPPER ) then
20 *
21 * U**T *A*Q = U**T *( A1 A2 )*Q = ( x 0 )
22 * ( 0 A3 ) ( x x )
23 * and
24 * V**T*B*Q = V**T *( B1 B2 )*Q = ( x 0 )
25 * ( 0 B3 ) ( x x )
26 *
27 * or if ( .NOT.UPPER ) then
28 *
29 * U**T *A*Q = U**T *( A1 0 )*Q = ( x x )
30 * ( A2 A3 ) ( 0 x )
31 * and
32 * V**T*B*Q = V**T*( B1 0 )*Q = ( x x )
33 * ( B2 B3 ) ( 0 x )
34 *
35 * The rows of the transformed A and B are parallel, where
36 *
37 * U = ( CSU SNU ), V = ( CSV SNV ), Q = ( CSQ SNQ )
38 * ( -SNU CSU ) ( -SNV CSV ) ( -SNQ CSQ )
39 *
40 * Z**T denotes the transpose of Z.
41 *
42 *
43 * Arguments
44 * =========
45 *
46 * UPPER (input) LOGICAL
47 * = .TRUE.: the input matrices A and B are upper triangular.
48 * = .FALSE.: the input matrices A and B are lower triangular.
49 *
50 * A1 (input) DOUBLE PRECISION
51 * A2 (input) DOUBLE PRECISION
52 * A3 (input) DOUBLE PRECISION
53 * On entry, A1, A2 and A3 are elements of the input 2-by-2
54 * upper (lower) triangular matrix A.
55 *
56 * B1 (input) DOUBLE PRECISION
57 * B2 (input) DOUBLE PRECISION
58 * B3 (input) DOUBLE PRECISION
59 * On entry, B1, B2 and B3 are elements of the input 2-by-2
60 * upper (lower) triangular matrix B.
61 *
62 * CSU (output) DOUBLE PRECISION
63 * SNU (output) DOUBLE PRECISION
64 * The desired orthogonal matrix U.
65 *
66 * CSV (output) DOUBLE PRECISION
67 * SNV (output) DOUBLE PRECISION
68 * The desired orthogonal matrix V.
69 *
70 * CSQ (output) DOUBLE PRECISION
71 * SNQ (output) DOUBLE PRECISION
72 * The desired orthogonal matrix Q.
73 *
74 * =====================================================================
75 *
76 * .. Parameters ..
77 DOUBLE PRECISION ZERO
78 PARAMETER ( ZERO = 0.0D+0 )
79 * ..
80 * .. Local Scalars ..
81 DOUBLE PRECISION A, AUA11, AUA12, AUA21, AUA22, AVB11, AVB12,
82 $ AVB21, AVB22, B, C, CSL, CSR, D, R, S1, S2,
83 $ SNL, SNR, UA11, UA11R, UA12, UA21, UA22, UA22R,
84 $ VB11, VB11R, VB12, VB21, VB22, VB22R
85 * ..
86 * .. External Subroutines ..
87 EXTERNAL DLARTG, DLASV2
88 * ..
89 * .. Intrinsic Functions ..
90 INTRINSIC ABS
91 * ..
92 * .. Executable Statements ..
93 *
94 IF( UPPER ) THEN
95 *
96 * Input matrices A and B are upper triangular matrices
97 *
98 * Form matrix C = A*adj(B) = ( a b )
99 * ( 0 d )
100 *
101 A = A1*B3
102 D = A3*B1
103 B = A2*B1 - A1*B2
104 *
105 * The SVD of real 2-by-2 triangular C
106 *
107 * ( CSL -SNL )*( A B )*( CSR SNR ) = ( R 0 )
108 * ( SNL CSL ) ( 0 D ) ( -SNR CSR ) ( 0 T )
109 *
110 CALL DLASV2( A, B, D, S1, S2, SNR, CSR, SNL, CSL )
111 *
112 IF( ABS( CSL ).GE.ABS( SNL ) .OR. ABS( CSR ).GE.ABS( SNR ) )
113 $ THEN
114 *
115 * Compute the (1,1) and (1,2) elements of U**T *A and V**T *B,
116 * and (1,2) element of |U|**T *|A| and |V|**T *|B|.
117 *
118 UA11R = CSL*A1
119 UA12 = CSL*A2 + SNL*A3
120 *
121 VB11R = CSR*B1
122 VB12 = CSR*B2 + SNR*B3
123 *
124 AUA12 = ABS( CSL )*ABS( A2 ) + ABS( SNL )*ABS( A3 )
125 AVB12 = ABS( CSR )*ABS( B2 ) + ABS( SNR )*ABS( B3 )
126 *
127 * zero (1,2) elements of U**T *A and V**T *B
128 *
129 IF( ( ABS( UA11R )+ABS( UA12 ) ).NE.ZERO ) THEN
130 IF( AUA12 / ( ABS( UA11R )+ABS( UA12 ) ).LE.AVB12 /
131 $ ( ABS( VB11R )+ABS( VB12 ) ) ) THEN
132 CALL DLARTG( -UA11R, UA12, CSQ, SNQ, R )
133 ELSE
134 CALL DLARTG( -VB11R, VB12, CSQ, SNQ, R )
135 END IF
136 ELSE
137 CALL DLARTG( -VB11R, VB12, CSQ, SNQ, R )
138 END IF
139 *
140 CSU = CSL
141 SNU = -SNL
142 CSV = CSR
143 SNV = -SNR
144 *
145 ELSE
146 *
147 * Compute the (2,1) and (2,2) elements of U**T *A and V**T *B,
148 * and (2,2) element of |U|**T *|A| and |V|**T *|B|.
149 *
150 UA21 = -SNL*A1
151 UA22 = -SNL*A2 + CSL*A3
152 *
153 VB21 = -SNR*B1
154 VB22 = -SNR*B2 + CSR*B3
155 *
156 AUA22 = ABS( SNL )*ABS( A2 ) + ABS( CSL )*ABS( A3 )
157 AVB22 = ABS( SNR )*ABS( B2 ) + ABS( CSR )*ABS( B3 )
158 *
159 * zero (2,2) elements of U**T*A and V**T*B, and then swap.
160 *
161 IF( ( ABS( UA21 )+ABS( UA22 ) ).NE.ZERO ) THEN
162 IF( AUA22 / ( ABS( UA21 )+ABS( UA22 ) ).LE.AVB22 /
163 $ ( ABS( VB21 )+ABS( VB22 ) ) ) THEN
164 CALL DLARTG( -UA21, UA22, CSQ, SNQ, R )
165 ELSE
166 CALL DLARTG( -VB21, VB22, CSQ, SNQ, R )
167 END IF
168 ELSE
169 CALL DLARTG( -VB21, VB22, CSQ, SNQ, R )
170 END IF
171 *
172 CSU = SNL
173 SNU = CSL
174 CSV = SNR
175 SNV = CSR
176 *
177 END IF
178 *
179 ELSE
180 *
181 * Input matrices A and B are lower triangular matrices
182 *
183 * Form matrix C = A*adj(B) = ( a 0 )
184 * ( c d )
185 *
186 A = A1*B3
187 D = A3*B1
188 C = A2*B3 - A3*B2
189 *
190 * The SVD of real 2-by-2 triangular C
191 *
192 * ( CSL -SNL )*( A 0 )*( CSR SNR ) = ( R 0 )
193 * ( SNL CSL ) ( C D ) ( -SNR CSR ) ( 0 T )
194 *
195 CALL DLASV2( A, C, D, S1, S2, SNR, CSR, SNL, CSL )
196 *
197 IF( ABS( CSR ).GE.ABS( SNR ) .OR. ABS( CSL ).GE.ABS( SNL ) )
198 $ THEN
199 *
200 * Compute the (2,1) and (2,2) elements of U**T *A and V**T *B,
201 * and (2,1) element of |U|**T *|A| and |V|**T *|B|.
202 *
203 UA21 = -SNR*A1 + CSR*A2
204 UA22R = CSR*A3
205 *
206 VB21 = -SNL*B1 + CSL*B2
207 VB22R = CSL*B3
208 *
209 AUA21 = ABS( SNR )*ABS( A1 ) + ABS( CSR )*ABS( A2 )
210 AVB21 = ABS( SNL )*ABS( B1 ) + ABS( CSL )*ABS( B2 )
211 *
212 * zero (2,1) elements of U**T *A and V**T *B.
213 *
214 IF( ( ABS( UA21 )+ABS( UA22R ) ).NE.ZERO ) THEN
215 IF( AUA21 / ( ABS( UA21 )+ABS( UA22R ) ).LE.AVB21 /
216 $ ( ABS( VB21 )+ABS( VB22R ) ) ) THEN
217 CALL DLARTG( UA22R, UA21, CSQ, SNQ, R )
218 ELSE
219 CALL DLARTG( VB22R, VB21, CSQ, SNQ, R )
220 END IF
221 ELSE
222 CALL DLARTG( VB22R, VB21, CSQ, SNQ, R )
223 END IF
224 *
225 CSU = CSR
226 SNU = -SNR
227 CSV = CSL
228 SNV = -SNL
229 *
230 ELSE
231 *
232 * Compute the (1,1) and (1,2) elements of U**T *A and V**T *B,
233 * and (1,1) element of |U|**T *|A| and |V|**T *|B|.
234 *
235 UA11 = CSR*A1 + SNR*A2
236 UA12 = SNR*A3
237 *
238 VB11 = CSL*B1 + SNL*B2
239 VB12 = SNL*B3
240 *
241 AUA11 = ABS( CSR )*ABS( A1 ) + ABS( SNR )*ABS( A2 )
242 AVB11 = ABS( CSL )*ABS( B1 ) + ABS( SNL )*ABS( B2 )
243 *
244 * zero (1,1) elements of U**T*A and V**T*B, and then swap.
245 *
246 IF( ( ABS( UA11 )+ABS( UA12 ) ).NE.ZERO ) THEN
247 IF( AUA11 / ( ABS( UA11 )+ABS( UA12 ) ).LE.AVB11 /
248 $ ( ABS( VB11 )+ABS( VB12 ) ) ) THEN
249 CALL DLARTG( UA12, UA11, CSQ, SNQ, R )
250 ELSE
251 CALL DLARTG( VB12, VB11, CSQ, SNQ, R )
252 END IF
253 ELSE
254 CALL DLARTG( VB12, VB11, CSQ, SNQ, R )
255 END IF
256 *
257 CSU = SNR
258 SNU = CSR
259 CSV = SNL
260 SNV = CSL
261 *
262 END IF
263 *
264 END IF
265 *
266 RETURN
267 *
268 * End of DLAGS2
269 *
270 END
2 $ SNV, CSQ, SNQ )
3 *
4 * -- LAPACK auxiliary routine (version 3.3.1) --
5 * -- LAPACK is a software package provided by Univ. of Tennessee, --
6 * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
7 * -- April 2011 --
8 *
9 * .. Scalar Arguments ..
10 LOGICAL UPPER
11 DOUBLE PRECISION A1, A2, A3, B1, B2, B3, CSQ, CSU, CSV, SNQ,
12 $ SNU, SNV
13 * ..
14 *
15 * Purpose
16 * =======
17 *
18 * DLAGS2 computes 2-by-2 orthogonal matrices U, V and Q, such
19 * that if ( UPPER ) then
20 *
21 * U**T *A*Q = U**T *( A1 A2 )*Q = ( x 0 )
22 * ( 0 A3 ) ( x x )
23 * and
24 * V**T*B*Q = V**T *( B1 B2 )*Q = ( x 0 )
25 * ( 0 B3 ) ( x x )
26 *
27 * or if ( .NOT.UPPER ) then
28 *
29 * U**T *A*Q = U**T *( A1 0 )*Q = ( x x )
30 * ( A2 A3 ) ( 0 x )
31 * and
32 * V**T*B*Q = V**T*( B1 0 )*Q = ( x x )
33 * ( B2 B3 ) ( 0 x )
34 *
35 * The rows of the transformed A and B are parallel, where
36 *
37 * U = ( CSU SNU ), V = ( CSV SNV ), Q = ( CSQ SNQ )
38 * ( -SNU CSU ) ( -SNV CSV ) ( -SNQ CSQ )
39 *
40 * Z**T denotes the transpose of Z.
41 *
42 *
43 * Arguments
44 * =========
45 *
46 * UPPER (input) LOGICAL
47 * = .TRUE.: the input matrices A and B are upper triangular.
48 * = .FALSE.: the input matrices A and B are lower triangular.
49 *
50 * A1 (input) DOUBLE PRECISION
51 * A2 (input) DOUBLE PRECISION
52 * A3 (input) DOUBLE PRECISION
53 * On entry, A1, A2 and A3 are elements of the input 2-by-2
54 * upper (lower) triangular matrix A.
55 *
56 * B1 (input) DOUBLE PRECISION
57 * B2 (input) DOUBLE PRECISION
58 * B3 (input) DOUBLE PRECISION
59 * On entry, B1, B2 and B3 are elements of the input 2-by-2
60 * upper (lower) triangular matrix B.
61 *
62 * CSU (output) DOUBLE PRECISION
63 * SNU (output) DOUBLE PRECISION
64 * The desired orthogonal matrix U.
65 *
66 * CSV (output) DOUBLE PRECISION
67 * SNV (output) DOUBLE PRECISION
68 * The desired orthogonal matrix V.
69 *
70 * CSQ (output) DOUBLE PRECISION
71 * SNQ (output) DOUBLE PRECISION
72 * The desired orthogonal matrix Q.
73 *
74 * =====================================================================
75 *
76 * .. Parameters ..
77 DOUBLE PRECISION ZERO
78 PARAMETER ( ZERO = 0.0D+0 )
79 * ..
80 * .. Local Scalars ..
81 DOUBLE PRECISION A, AUA11, AUA12, AUA21, AUA22, AVB11, AVB12,
82 $ AVB21, AVB22, B, C, CSL, CSR, D, R, S1, S2,
83 $ SNL, SNR, UA11, UA11R, UA12, UA21, UA22, UA22R,
84 $ VB11, VB11R, VB12, VB21, VB22, VB22R
85 * ..
86 * .. External Subroutines ..
87 EXTERNAL DLARTG, DLASV2
88 * ..
89 * .. Intrinsic Functions ..
90 INTRINSIC ABS
91 * ..
92 * .. Executable Statements ..
93 *
94 IF( UPPER ) THEN
95 *
96 * Input matrices A and B are upper triangular matrices
97 *
98 * Form matrix C = A*adj(B) = ( a b )
99 * ( 0 d )
100 *
101 A = A1*B3
102 D = A3*B1
103 B = A2*B1 - A1*B2
104 *
105 * The SVD of real 2-by-2 triangular C
106 *
107 * ( CSL -SNL )*( A B )*( CSR SNR ) = ( R 0 )
108 * ( SNL CSL ) ( 0 D ) ( -SNR CSR ) ( 0 T )
109 *
110 CALL DLASV2( A, B, D, S1, S2, SNR, CSR, SNL, CSL )
111 *
112 IF( ABS( CSL ).GE.ABS( SNL ) .OR. ABS( CSR ).GE.ABS( SNR ) )
113 $ THEN
114 *
115 * Compute the (1,1) and (1,2) elements of U**T *A and V**T *B,
116 * and (1,2) element of |U|**T *|A| and |V|**T *|B|.
117 *
118 UA11R = CSL*A1
119 UA12 = CSL*A2 + SNL*A3
120 *
121 VB11R = CSR*B1
122 VB12 = CSR*B2 + SNR*B3
123 *
124 AUA12 = ABS( CSL )*ABS( A2 ) + ABS( SNL )*ABS( A3 )
125 AVB12 = ABS( CSR )*ABS( B2 ) + ABS( SNR )*ABS( B3 )
126 *
127 * zero (1,2) elements of U**T *A and V**T *B
128 *
129 IF( ( ABS( UA11R )+ABS( UA12 ) ).NE.ZERO ) THEN
130 IF( AUA12 / ( ABS( UA11R )+ABS( UA12 ) ).LE.AVB12 /
131 $ ( ABS( VB11R )+ABS( VB12 ) ) ) THEN
132 CALL DLARTG( -UA11R, UA12, CSQ, SNQ, R )
133 ELSE
134 CALL DLARTG( -VB11R, VB12, CSQ, SNQ, R )
135 END IF
136 ELSE
137 CALL DLARTG( -VB11R, VB12, CSQ, SNQ, R )
138 END IF
139 *
140 CSU = CSL
141 SNU = -SNL
142 CSV = CSR
143 SNV = -SNR
144 *
145 ELSE
146 *
147 * Compute the (2,1) and (2,2) elements of U**T *A and V**T *B,
148 * and (2,2) element of |U|**T *|A| and |V|**T *|B|.
149 *
150 UA21 = -SNL*A1
151 UA22 = -SNL*A2 + CSL*A3
152 *
153 VB21 = -SNR*B1
154 VB22 = -SNR*B2 + CSR*B3
155 *
156 AUA22 = ABS( SNL )*ABS( A2 ) + ABS( CSL )*ABS( A3 )
157 AVB22 = ABS( SNR )*ABS( B2 ) + ABS( CSR )*ABS( B3 )
158 *
159 * zero (2,2) elements of U**T*A and V**T*B, and then swap.
160 *
161 IF( ( ABS( UA21 )+ABS( UA22 ) ).NE.ZERO ) THEN
162 IF( AUA22 / ( ABS( UA21 )+ABS( UA22 ) ).LE.AVB22 /
163 $ ( ABS( VB21 )+ABS( VB22 ) ) ) THEN
164 CALL DLARTG( -UA21, UA22, CSQ, SNQ, R )
165 ELSE
166 CALL DLARTG( -VB21, VB22, CSQ, SNQ, R )
167 END IF
168 ELSE
169 CALL DLARTG( -VB21, VB22, CSQ, SNQ, R )
170 END IF
171 *
172 CSU = SNL
173 SNU = CSL
174 CSV = SNR
175 SNV = CSR
176 *
177 END IF
178 *
179 ELSE
180 *
181 * Input matrices A and B are lower triangular matrices
182 *
183 * Form matrix C = A*adj(B) = ( a 0 )
184 * ( c d )
185 *
186 A = A1*B3
187 D = A3*B1
188 C = A2*B3 - A3*B2
189 *
190 * The SVD of real 2-by-2 triangular C
191 *
192 * ( CSL -SNL )*( A 0 )*( CSR SNR ) = ( R 0 )
193 * ( SNL CSL ) ( C D ) ( -SNR CSR ) ( 0 T )
194 *
195 CALL DLASV2( A, C, D, S1, S2, SNR, CSR, SNL, CSL )
196 *
197 IF( ABS( CSR ).GE.ABS( SNR ) .OR. ABS( CSL ).GE.ABS( SNL ) )
198 $ THEN
199 *
200 * Compute the (2,1) and (2,2) elements of U**T *A and V**T *B,
201 * and (2,1) element of |U|**T *|A| and |V|**T *|B|.
202 *
203 UA21 = -SNR*A1 + CSR*A2
204 UA22R = CSR*A3
205 *
206 VB21 = -SNL*B1 + CSL*B2
207 VB22R = CSL*B3
208 *
209 AUA21 = ABS( SNR )*ABS( A1 ) + ABS( CSR )*ABS( A2 )
210 AVB21 = ABS( SNL )*ABS( B1 ) + ABS( CSL )*ABS( B2 )
211 *
212 * zero (2,1) elements of U**T *A and V**T *B.
213 *
214 IF( ( ABS( UA21 )+ABS( UA22R ) ).NE.ZERO ) THEN
215 IF( AUA21 / ( ABS( UA21 )+ABS( UA22R ) ).LE.AVB21 /
216 $ ( ABS( VB21 )+ABS( VB22R ) ) ) THEN
217 CALL DLARTG( UA22R, UA21, CSQ, SNQ, R )
218 ELSE
219 CALL DLARTG( VB22R, VB21, CSQ, SNQ, R )
220 END IF
221 ELSE
222 CALL DLARTG( VB22R, VB21, CSQ, SNQ, R )
223 END IF
224 *
225 CSU = CSR
226 SNU = -SNR
227 CSV = CSL
228 SNV = -SNL
229 *
230 ELSE
231 *
232 * Compute the (1,1) and (1,2) elements of U**T *A and V**T *B,
233 * and (1,1) element of |U|**T *|A| and |V|**T *|B|.
234 *
235 UA11 = CSR*A1 + SNR*A2
236 UA12 = SNR*A3
237 *
238 VB11 = CSL*B1 + SNL*B2
239 VB12 = SNL*B3
240 *
241 AUA11 = ABS( CSR )*ABS( A1 ) + ABS( SNR )*ABS( A2 )
242 AVB11 = ABS( CSL )*ABS( B1 ) + ABS( SNL )*ABS( B2 )
243 *
244 * zero (1,1) elements of U**T*A and V**T*B, and then swap.
245 *
246 IF( ( ABS( UA11 )+ABS( UA12 ) ).NE.ZERO ) THEN
247 IF( AUA11 / ( ABS( UA11 )+ABS( UA12 ) ).LE.AVB11 /
248 $ ( ABS( VB11 )+ABS( VB12 ) ) ) THEN
249 CALL DLARTG( UA12, UA11, CSQ, SNQ, R )
250 ELSE
251 CALL DLARTG( VB12, VB11, CSQ, SNQ, R )
252 END IF
253 ELSE
254 CALL DLARTG( VB12, VB11, CSQ, SNQ, R )
255 END IF
256 *
257 CSU = SNR
258 SNU = CSR
259 CSV = SNL
260 SNV = CSL
261 *
262 END IF
263 *
264 END IF
265 *
266 RETURN
267 *
268 * End of DLAGS2
269 *
270 END