1
        2
        3
        4
        5
        6
        7
        8
        9
       10
       11
       12
       13
       14
       15
       16
       17
       18
       19
       20
       21
       22
       23
       24
       25
       26
       27
       28
       29
       30
       31
       32
       33
       34
       35
       36
       37
       38
       39
       40
       41
       42
       43
       44
       45
       46
       47
       48
       49
       50
       51
       52
       53
       54
       55
       56
       57
       58
       59
       60
       61
       62
       63
       64
       65
       66
       67
       68
       69
       70
       71
       72
       73
       74
       75
       76
       77
       78
       79
       80
       81
       82
       83
       84
       85
       86
       87
       88
       89
       90
       91
       92
       93
       94
       95
       96
       97
       98
       99
      100
      101
      102
      103
      104
      105
      106
      107
      108
      109
      110
      111
      112
      113
      114
      115
      116
      117
      118
      119
      120
      121
      122
      123
      124
      125
      126
      127
      128
      129
      130
      131
      132
      133
      134
      135
      136
      137
      138
      139
      140
      141
      142
      143
      144
      145
      146
      147
      148
      149
      150
      151
      152
      153
      154
      155
      156
      157
      158
      159
      160
      161
      162
      163
      164
      165
      166
      167
      168
      169
      170
      171
      172
      173
      174
      175
      176
      177
      178
      179
      180
      181
      182
      183
      184
      185
      186
      187
      188
      189
      190
      191
      192
      193
      194
      195
      196
      197
      198
      199
      200
      201
      202
      203
      204
      205
      206
      207
      208
      209
      210
      211
      212
      213
      214
      215
      216
      217
      218
      219
      220
      221
      222
      223
      224
      225
      226
      227
      228
      229
      230
      231
      232
      233
      234
      235
      236
      237
      238
      239
      240
      241
      242
      243
      244
      245
      246
      247
      248
      249
      250
      251
      252
      253
      254
      255
      256
      257
      258
      259
      260
      261
      262
      263
      264
      265
      266
      267
      268
      269
      270
      271
      272
      273
      274
      275
      276
      277
      278
      279
      280
      281
      282
      283
      284
      285
      286
      287
      288
      289
      290
      291
      292
      293
      294
      295
      296
      297
      298
      299
      300
      301
      302
      303
      304
      305
      306
      307
      308
      309
      310
      311
      312
      313
      314
      315
      316
      317
      318
      319
      320
      321
      322
      323
      324
      325
      326
      327
      328
      329
      330
      331
      332
      333
      334
      335
      336
      337
      338
      339
      340
      341
      342
      343
      344
      345
      346
      347
      348
      349
      350
      351
      352
      353
      354
      355
      356
      357
      358
      359
      360
      361
      362
      363
      364
      365
      366
      367
      368
      369
      370
      371
      372
      373
      374
      375
      376
      377
      378
      379
      380
      381
      382
      383
      384
      385
      386
      387
      388
      389
      390
      391
      392
      393
      394
      395
      396
      397
      398
      399
      400
      401
      402
      403
      404
      405
      406
      407
      408
      409
      410
      411
      412
      413
      414
      415
      416
      417
      418
      419
      420
      421
      422
      423
      424
      425
      426
      427
      428
      429
      430
      431
      432
      433
      434
      435
      436
      437
      438
      439
      440
      441
      442
      443
      444
      445
      446
      447
      448
      449
      450
      451
      452
      453
      454
      455
      456
      457
      458
      459
      460
      461
      462
      463
      464
      465
      466
      467
      468
      469
      470
      471
      472
      473
      474
      475
      476
      477
      478
      479
      480
      481
      482
      483
      484
      485
      486
      487
      488
      489
      490
      491
      492
      493
      494
      495
      496
      497
      498
      499
      500
      501
      502
      503
      504
      505
      506
      507
      508
      509
      510
      511
      512
      513
      514
      515
      516
      517
      518
      519
      520
      521
      522
      523
      524
      525
      526
      527
      528
      529
      530
      531
      532
      533
      534
      535
      536
      537
      538
      539
      540
      541
      542
      543
      544
      545
      546
      547
      548
      549
      550
      551
      552
      553
      554
      555
      556
      557
      558
      559
      560
      561
      562
      563
      564
      565
      566
      567
      568
      569
      570
      571
      572
      573
      574
      575
      576
      577
      578
      579
      580
      581
      582
      583
      584
      585
      586
      587
      588
      589
      590
      591
      592
      593
      594
      595
      596
      597
      598
      599
      600
      601
      602
      603
      604
      605
      606
      607
      608
      609
      610
      611
      612
      613
      614
      615
      616
      617
      618
      619
      620
      621
      622
      623
      624
      625
      626
      627
      628
      629
      630
      631
      632
      633
      634
      635
      636
      637
      638
      639
      640
      641
      642
      643
      644
      645
      646
      647
      648
      649
      650
      651
      652
      653
      654
      655
      656
      657
      658
      659
      660
      661
      662
      663
      664
      665
      666
      667
      668
      669
      670
      671
      672
      673
      674
      675
      676
      677
      678
      679
      680
      681
      682
      683
      684
      685
      686
      687
      688
      689
      690
      691
      692
      693
      694
      695
      696
      697
      698
      699
      700
      701
      702
      703
      704
      705
      706
      707
      708
      709
      710
      711
      712
      713
      714
      715
      716
      717
      718
      719
      720
      721
      722
      723
      724
      725
      726
      727
      728
      729
      730
      731
      732
      733
      734
      735
      736
      737
      738
      739
      740
      741
      742
      743
      744
      745
      746
      747
      748
      749
      750
      751
      752
      753
      754
      755
      756
      757
      758
      759
      760
      761
      762
      763
      764
      765
      766
      767
      768
      769
      770
      771
      772
      773
      774
      775
      776
      777
      778
      779
      780
      781
      782
      783
      784
      785
      786
      787
      788
      789
      790
      791
      792
      793
      794
      795
      796
      797
      798
      799
      800
      801
      802
      803
      804
      805
      806
      807
      808
      809
      810
      811
      812
      813
      814
      815
      816
      817
      818
      819
      820
      821
      822
      823
      824
      825
      826
      827
      828
      829
      830
      831
      832
      833
      834
      835
      836
      837
      838
      839
      840
      841
      842
      843
      844
      845
      846
      847
      848
      849
      850
      851
      852
      853
      854
      855
      856
      857
      858
      859
      860
      861
      862
      863
      864
      865
      866
      867
      868
      869
      870
      871
      872
      873
      874
      875
      876
      877
      878
      879
      880
      881
      882
      883
      884
      885
      886
      887
      888
      889
      890
      891
      892
      893
      894
      895
      896
      897
      898
      899
      900
      901
      902
      903
      904
      905
      906
      907
      908
      909
      910
      911
      912
      913
      914
      915
      916
      917
      918
      919
      920
      921
      922
      923
      924
      925
      926
      927
      928
      929
      930
      931
      932
      933
      934
      935
      936
      937
      938
      939
      940
      941
      942
      943
      944
      945
      946
      947
      948
      949
      950
      951
      952
      953
      954
      955
      956
      957
      958
      959
      960
      961
      962
      963
      964
      965
      966
      967
      968
      969
      970
      971
      972
      973
      974
      975
      976
      977
      978
      979
      980
      981
      982
      983
      984
      985
      986
      987
      988
      989
      990
      991
      992
      993
      994
      995
      996
      997
      998
      999
     1000
     1001
     1002
     1003
     1004
     1005
     1006
     1007
     1008
     1009
     1010
     1011
     1012
     1013
     1014
     1015
     1016
     1017
     1018
     1019
     1020
     1021
     1022
     1023
     1024
     1025
     1026
     1027
     1028
     1029
     1030
     1031
     1032
     1033
     1034
     1035
     1036
     1037
     1038
     1039
     1040
     1041
     1042
     1043
     1044
     1045
     1046
     1047
     1048
     1049
     1050
     1051
     1052
     1053
     1054
     1055
     1056
     1057
     1058
     1059
     1060
     1061
     1062
     1063
     1064
     1065
     1066
     1067
     1068
     1069
     1070
     1071
     1072
     1073
     1074
     1075
     1076
     1077
     1078
     1079
     1080
     1081
     1082
     1083
     1084
     1085
     1086
     1087
     1088
     1089
     1090
     1091
     1092
     1093
     1094
     1095
     1096
     1097
     1098
     1099
     1100
     1101
     1102
     1103
     1104
     1105
     1106
     1107
     1108
     1109
     1110
     1111
     1112
     1113
     1114
     1115
     1116
     1117
     1118
     1119
     1120
     1121
     1122
     1123
     1124
     1125
     1126
     1127
     1128
     1129
     1130
     1131
     1132
     1133
     1134
     1135
     1136
     1137
     1138
     1139
     1140
     1141
     1142
     1143
     1144
     1145
     1146
     1147
     1148
     1149
     1150
     1151
     1152
     1153
     1154
     1155
     1156
     1157
     1158
     1159
     1160
     1161
     1162
     1163
     1164
     1165
     1166
     1167
     1168
     1169
     1170
     1171
     1172
     1173
     1174
     1175
     1176
     1177
     1178
     1179
     1180
     1181
     1182
     1183
/*
 *   Copyright (c) 2011, Michael Lehn
 *
 *   All rights reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *   1) Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *   2) Redistributions in binary form must reproduce the above copyright
 *      notice, this list of conditions and the following disclaimer in
 *      the documentation and/or other materials provided with the
 *      distribution.
 *   3) Neither the name of the FLENS development group nor the names of
 *      its contributors may be used to endorse or promote products derived
 *      from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/* Based on
      SUBROUTINE DLAHQR( WANTT, WANTZ, N, ILO, IHI, H, LDH, WR, WI,
     $                   ILOZ, IHIZ, Z, LDZ, INFO )
      SUBROUTINE ZLAHQR( WANTT, WANTZ, N, ILO, IHI, H, LDH, W, ILOZ,
     $                   IHIZ, Z, LDZ, INFO )
 *
 *  -- LAPACK auxiliary routine (version 3.2) --
 *     Univ. of Tennessee, Univ. of California Berkeley,
 *     Univ. of Colorado Denver and NAG Ltd..
 *     November 2006
 */

#ifndef FLENS_LAPACK_LA_LAHQR_TCC
#define FLENS_LAPACK_LA_LAHQR_TCC 1

#include <flens/blas/blas.h>
#include <flens/lapack/lapack.h>

namespace flens { namespace lapack {

//== generic lapack implementation =============================================

namespace generic {

//
//  Real variant
//
template <typename IndexType, typename MH, typename VWR, typename VWI,
          typename MZ>
IndexType
lahqr_impl(bool                  wantT,
           bool                  wantZ,
           IndexType             iLo,
           IndexType             iHi,
           GeMatrix<MH>          &H,
           DenseVector<VWR>      &wr,
           DenseVector<VWI>      &wi,
           IndexType             iLoZ,
           IndexType             iHiZ,
           GeMatrix<MZ>          &Z)
{
    using std::abs;
    using std::max;
    using std::min;

    typedef typename GeMatrix<MH>::ElementType  T;

    const Underscore<IndexType>     _;
    const T                         Zero(0), One(1), Two(2);
    const T                         Dat1 = T(3)/T(4),
                                    Dat2 = T(-0.4375);
    const IndexType                 itMax = 30;
    const IndexType                 n = H.numRows();

    typedef typename GeMatrix<MH>::VectorView   VectorView;
    T           vBuffer[3];
    VectorView  v = typename VectorView::Engine(3, vBuffer);

//
//  Quick return if possible
//
    if (n==0) {
        return 0;
    }
    if (iLo==iHi) {
        wr(iLo) = H(iLo, iLo);
        wi(iLo) = Zero;
        return 0;
    }
//
//  ==== clear out the trash ====
//
    for (IndexType j=iLo; j<=iHi-3; ++j) {
        H(j+2, j) = Zero;
        H(j+3, j) = Zero;
    }
    if (iLo<=iHi-2) {
        H(iHi, iHi-2) = Zero;
    }

    const IndexType nh = iHi - iLo + 1;
//
//  Set machine-dependent constants for the stopping criterion.
//
    T safeMin = lamch<T>(SafeMin);
    T safeMax = One / safeMin;
    labad(safeMin, safeMax);
    const T ulp = lamch<T>(Precision);
    const T smallNum = safeMin*(T(nh)/ulp);
//
//  i1 and i2 are the indices of the first row and last column of H
//  to which transformations must be applied. If eigenvalues only are
//  being computed, i1 and i2 are set inside the main loop.
//
    IndexType i1 = -1, i2 = -1;
    if (wantT) {
        i1 = 1;
        i2 = n;
    }
//
//  The main loop begins here. Variable i is the loop index and decreases from
//  iHi to iLo in steps of 1 or 2. Each iteration of the loop works
//  with the active submatrix in rows and columns l to i.
//  Eigenvalues i+1 to iHi have already converged. Either l = iLo or
//  H(l,l-1) is negligible so that the matrix splits.
//
    IndexType i = iHi;
    while (true) {
        IndexType l = iLo;
        if (i<iLo) {
            break;
        }
//
//      Perform QR iterations on rows and columns iLo to i until a
//      submatrix of order 1 or 2 splits off at the bottom because a
//      subdiagonal element has become negligible.
//
        IndexType its;
        for (its=0; its<=itMax; ++its) {
//
//          Look for a single small subdiagonal element.
//
            IndexType k;
            for (k=i; k>=l+1; --k) {
                if (abs(H(k,k-1))<=smallNum) {
                    break;
                }
                T test = abs(H(k-1,k-1)) + abs(H(k,k));
                if (test==Zero) {
                    if (k-2>=iLo) {
                        test += abs(H(k-1,k-2));
                    }
                    if (k+1<=iHi) {
                        test += abs(H(k+1,k));
                    }
                }
//              ==== The following is a conservative small subdiagonal
//              .    deflation  criterion due to Ahues & Tisseur (LAWN 122,
//              .    1997). It has better mathematical foundation and
//              .    improves accuracy in some cases.  ====
                if (abs(H(k,k-1))<=ulp*test) {
                    const T ab = max(abs(H(k, k-1)), abs(H(k-1, k)));
                    const T ba = min(abs(H(k, k-1)), abs(H(k-1, k)));
                    const T aa = max(abs(H(k,   k)), abs(H(k-1, k-1)-H(k, k)));
                    const T bb = min(abs(H(k,   k)), abs(H(k-1, k-1)-H(k, k)));
                    const T s = aa + ab;
                    if (ba*(ab/s)<=max(smallNum, ulp*(bb*(aa/s)))) {
                        break;
                    }
                }
            }
            l = k;

            if (l>iLo) {
//
//              H(l,l-1) is negligible
//
                H(l, l-1) = Zero;
            }
//
//          Exit from loop if a submatrix of order 1 or 2 has split off.
//
            if (l>=i-1) {
                break;
            }
//
//          Now the active submatrix is in rows and columns l to i. If
//          eigenvalues only are being computed, only the active submatrix
//          need be transformed.
//
            if (!wantT) {
                i1 = l;
                i2 = i;
            }

            T H11, H12, H21, H22;
            T rt1r, rt2r, rt1i, rt2i;
            if (its==10) {
//
//              Exceptional shift.
//
                const T s = abs(H(l+1,l)) + abs(H(l+2, l+1));
                H11 = Dat1*s + H(l,l);
                H12 = Dat2*s;
                H21 = s;
                H22 = H11;
            } else if (its==20) {
//
//              Exceptional shift.
//
                const T s = abs(H(i,i-1)) + abs(H(i-1,i-2));
                H11 = Dat1*s + H(i,i);
                H12 = Dat2*s;
                H21 = s;
                H22 = H11;
            } else {
//
//              Prepare to use Francis' double shift
//              (i.e. 2nd degree generalized Rayleigh quotient)
//
                H11 = H(i-1, i-1);
                H21 = H(  i, i-1);
                H12 = H(i-1,   i);
                H22 = H(  i,   i);
            }

            const T s = abs(H11) + abs(H12) + abs(H21) + abs(H22);
            if (s==Zero) {
                rt1r = Zero;
                rt1i = Zero;
                rt2r = Zero;
                rt2i = Zero;
            } else {
                H11 /= s;
                H21 /= s;
                H12 /= s;
                H22 /= s;
                const T tr = (H11+H22) / Two;
                const T det = (H11-tr)*(H22-tr) - H12*H21;
                const T rtDisc = sqrt(abs(det));
                if (det>=Zero) {
//
//                  ==== complex conjugate shifts ====
//
                    rt1r = tr*s;
                    rt2r = rt1r;
                    rt1i = rtDisc*s;
                    rt2i = -rt1i;
                } else {
//
//                  ==== real shifts (use only one of them)  ====
//
                    rt1r = tr + rtDisc;
                    rt2r = tr - rtDisc;
                    if (abs(rt1r-H22)<=abs(rt2r-H22)) {
                        rt1r *= s;
                        rt2r = rt1r;
                    } else {
                        rt2r *= s;
                        rt1r = rt2r;
                    }
                    rt1i = Zero;
                    rt2i = Zero;
                }
            }
//
//          Look for two consecutive small subdiagonal elements.
//
            IndexType m;
            for (m=i-2; m>=l; --m) {
//              Determine the effect of starting the double-shift QR
//              iteration at row m, and see if this would make H(m,m-1)
//              negligible.  (The following uses scaling to avoid
//              overflows and most underflows.)
//
                T H21S = H(m+1,m);
                T s = abs(H(m,m)-rt2r) + abs(rt2i) + abs(H21S);

                H21S = H(m+1,m)/s;
                v(1) = H21S*H(m,m+1)
                     + (H(m,m)-rt1r)*((H(m,m)-rt2r)/s)
                     - rt1i*(rt2i/s);
                v(2) = H21S*(H(m,m) + H(m+1,m+1)-rt1r-rt2r);
                v(3) = H21S*H(m+2,m+1);

                s = abs(v(1)) + abs(v(2)) + abs(v(3));
                v(1) /= s;
                v(2) /= s;
                v(3) /= s;

                if (m==l) {
                    break;
                }

                const T value1 = abs(H(m,m-1))*(abs(v(2))+abs(v(3)));
                const T value2 = ulp*abs(v(1))
                                 *(abs(H(m-1,m-1))+abs(H(m,m))+abs(H(m+1,m+1)));
                if (value1<=value2) {
                    break;
                }
            }
//
//          Double-shift QR step
//
            for (k=m; k<=i-1; ++k) {
//
//              The first iteration of this loop determines a reflection G
//              from the vector v and applies it from left and right to H,
//              thus creating a nonzero bulge below the subdiagonal.
//
//              Each subsequent iteration determines a reflection G to
//              restore the Hessenberg form in the (k-1)th column, and thus
//              chases the bulge one step toward the bottom of the active
//              submatrix. 'nr' is the order of G.
//
                T   t1, t2, t3, v2, v3;

                IndexType nr = min(IndexType(3), i-k+1);
                if (k>m) {
                    v(_(1,nr)) = H(_(k,k+nr-1),k-1);
                }
                larfg(nr, v(1), v(_(2,nr)), t1);
                if (k>m) {
                    H(k,  k-1) = v(1);
                    H(k+1,k-1) = Zero;
                    if (k<i-1) {
                        H(k+2,k-1) = Zero;
                    }
                } else if (m>l) {
//                  ==== Use the following instead of
//                  .    H( K, K-1 ) = -H( K, K-1 ) to
//                  .    avoid a bug when v(2) and v(3)
//                  .    underflow. ====
                    H(k, k-1) *= (One-t1);
                }
                v2 = v(2);
                t2 = t1*v2;
                if (nr==3) {
                    v3 = v(3);
                    t3 = t1*v3;
//
//                  Apply G from the left to transform the rows of the matrix
//                  in columns k to i2.
//
                    for (IndexType j=k; j<=i2; ++j) {
                        const T sum = H(k,j) + v2*H(k+1,j) + v3*H(k+2,j);
                        H(k,   j) -= sum*t1;
                        H(k+1, j) -= sum*t2;
                        H(k+2, j) -= sum*t3;
                    }
//
//                  Apply G from the right to transform the columns of the
//                  matrix in rows i1 to min(k+3,i).
//
                    for (IndexType j=i1; j<=min(k+3,i); ++j) {
                        const T sum = H(j, k) + v2*H(j,k+1) + v3*H(j,k+2);
                        H(j, k  ) -= sum*t1;
                        H(j, k+1) -= sum*t2;
                        H(j, k+2) -= sum*t3;
                    }

                    if (wantZ) {
//
//                      Accumulate transformations in the matrix Z
//
                        for (IndexType j=iLoZ; j<=iHiZ; ++j) {
                            const T sum = Z(j, k) + v2*Z(j, k+1) + v3*Z(j, k+2);
                            Z(j, k  ) -= sum*t1;
                            Z(j, k+1) -= sum*t2;
                            Z(j, k+2) -= sum*t3;
                        }
                    }
                } else if (nr==2) {
//
//                  Apply G from the left to transform the rows of the matrix
//                  in columns K to I2.
//
                    for (IndexType j=k; j<=i2; ++j) {
                        const T sum = H(k, j) + v2*H(k+1, j);
                        H(k,   j) -= sum*t1;
                        H(k+1, j) -= sum*t2;
                    }
//
//                  Apply G from the right to transform the columns of the
//                  matrix in rows i1 to min(k+3,i).
//
                    for (IndexType j=i1; j<=i; ++j) {
                        const T sum = H(j, k) + v2*H(j, k+1);
                        H(j, k  ) -= sum*t1;
                        H(j, k+1) -= sum*t2;
                    }

                    if (wantZ) {
//
//                      Accumulate transformations in the matrix Z
//
                        for (IndexType j=iLoZ; j<=iHiZ; ++j) {
                            const T sum = Z(j, k) + v2*Z(j, k+1);
                            Z(j, k  ) -= sum*t1;
                            Z(j, k+1) -= sum*t2;
                        }
                    }
                }
            }
        }
//
//      Failure to converge in remaining number of iterations
//
        if (its>itMax) {
            return i;
        }

        if (l==i) {
//
//          H(I,I-1) is negligible: one eigenvalue has converged.
//
            wr(i) = H(i, i);
            wi(i) = Zero;
        } else if (l==i-1) {
//
//          H(I-1,I-2) is negligible: a pair of eigenvalues have converged.
//
//          Transform the 2-by-2 submatrix to standard Schur form,
//          and compute and store the eigenvalues.
//
            T cs, sn;
            lanv2(H(i-1,i-1), H(i-1,i), H(i,i-1), H(i,i),
                  wr(i-1), wi(i-1), wr(i), wi(i),
                  cs, sn);

            if (wantT) {
//
//              Apply the transformation to the rest of H.
//
                if (i2>i) {
                    const auto cols = _(i+1,i2);
                    blas::rot(H(i-1,cols), H(i,cols), cs, sn);
                }
                const auto rows = _(i1,i-2);
                blas::rot(H(rows, i-1), H(rows, i), cs, sn);
            }
            if (wantZ) {
//
//              Apply the transformation to Z.
//
                const auto rows = _(iLoZ, iHiZ);
                blas::rot(Z(rows, i-1), Z(rows, i), cs, sn);
            }
        }
//
//      return to start of the main loop with new value of I.
//
        i = l - 1;
    }
    return 0;
}

//
//  Complex variant
//
template <typename IndexType, typename MH, typename VW, typename MZ>
IndexType
lahqr_impl(bool                  wantT,
           bool                  wantZ,
           IndexType             iLo,
           IndexType             iHi,
           GeMatrix<MH>          &H,
           DenseVector<VW>       &w,
           IndexType             iLoZ,
           IndexType             iHiZ,
           GeMatrix<MZ>          &Z)
{
    using std::abs;
    using std::conj;
    using std::imag;
    using std::max;
    using std::min;
    using std::real;

    typedef typename GeMatrix<MH>::ElementType          T;
    typedef typename ComplexTrait<T>::PrimitiveType     PT;

    const Underscore<IndexType>     _;
    const T                         Zero(0), One(1);
    const PT                        RZero(0), ROne(1), RHalf(0.5);

    const PT                        Dat1 = PT(3)/PT(4);
    const IndexType                 itMax = 30;
    const IndexType                 n = H.numRows();


    typedef typename GeMatrix<MH>::VectorView           VectorView;

    T           vBuffer[2];
    VectorView  v = typename VectorView::Engine(2, vBuffer);

//
//  Quick return if possible
//
    if (n==0) {
        return 0;
    }
    if (iLo==iHi) {
        w(iLo) = H(iLo, iLo);
        return 0;
    }
//
//  ==== clear out the trash ====
//
    for (IndexType j=iLo; j<=iHi-3; ++j) {
        H(j+2, j) = Zero;
        H(j+3, j) = Zero;
    }
    if (iLo<=iHi-2) {
        H(iHi, iHi-2) = Zero;
    }

//
//  ==== ensure that subdiagonal entries are real ====
//
    const IndexType jLo = (wantT) ? 1 : iLo;
    const IndexType jHi = (wantT) ? n : iHi;

    for (IndexType i=iLo+1; i<=iHi; ++i) {
        if (imag(H(i,i-1))!=RZero) {
//          ==== The following redundant normalization
//          .    avoids problems with both gradual and
//          .    sudden underflow in ABS(H(I,I-1)) ====
            T scale = H(i,i-1) / abs1(H(i,i-1));
            scale = conj(scale) / abs(scale);
            H(i,i-1) = abs(H(i,i-1));
            H(i, _(i,jHi))            *= scale;
            H(_(jLo,min(jHi,i+1)), i) *= conj(scale);

            if (wantZ) {
                Z(_(iLoZ,iHiZ),i) *= conj(scale);
            }
        }
    }

    const IndexType nh = iHi - iLo + 1;
//
//  Set machine-dependent constants for the stopping criterion.
//
    PT safeMin = lamch<PT>(SafeMin);
    PT safeMax = ROne / safeMin;
    labad(safeMin, safeMax);
    const PT ulp      = lamch<PT>(Precision);
    const PT smallNum = safeMin*(PT(nh)/ulp);
//
//  i1 and i2 are the indices of the first row and last column of H
//  to which transformations must be applied. If eigenvalues only are
//  being computed, i1 and i2 are set inside the main loop.
//
    IndexType i1 = -1, i2 = -1;
    if (wantT) {
        i1 = 1;
        i2 = n;
    }
//
//  The main loop begins here. Variable i is the loop index and decreases from
//  iHi to iLo in steps of 1 or 2. Each iteration of the loop works
//  with the active submatrix in rows and columns l to i.
//  Eigenvalues i+1 to iHi have already converged. Either l = iLo or
//  H(l,l-1) is negligible so that the matrix splits.
//
    IndexType i = iHi;
    while (true) {
        if (i<iLo) {
            break;
        }
//
//      Perform QR iterations on rows and columns iLo to i until a
//      submatrix of order 1 or 2 splits off at the bottom because a
//      subdiagonal element has become negligible.
//
        IndexType l = iLo;
        IndexType its;
        for (its=0; its<=itMax; ++its) {
//
//          Look for a single small subdiagonal element.
//
            IndexType k;
            for (k=i; k>=l+1; --k) {
                if (abs1(H(k,k-1))<=smallNum) {
                    break;
                }
                PT test = abs1(H(k-1,k-1)) + abs1(H(k,k));
                if (test==RZero) {
                    if (k-2>=iLo) {
                        test += abs(real(H(k-1,k-2)));
                    }
                    if (k+1<=iHi) {
                        test += abs(real(H(k+1,k)));
                    }
                }
//              ==== The following is a conservative small subdiagonal
//              .    deflation  criterion due to Ahues & Tisseur (LAWN 122,
//              .    1997). It has better mathematical foundation and
//              .    improves accuracy in some cases.  ====
                if (abs(real(H(k,k-1)))<=ulp*test) {
                    const PT ab = max(abs1(H(k, k-1)),
                                      abs1(H(k-1, k)));
                    const PT ba = min(abs1(H(k, k-1)),
                                      abs1(H(k-1, k)));
                    const PT aa = max(abs1(H(k, k)),
                                      abs1(H(k-1, k-1)-H(k, k)));
                    const PT bb = min(abs1(H(k, k)),
                                      abs1(H(k-1, k-1)-H(k, k)));
                    const PT s = aa + ab;
                    if (ba*(ab/s)<=max(smallNum, ulp*(bb*(aa/s)))) {
                        break;
                    }
                }
            }
            l = k;

            if (l>iLo) {
//
//              H(l,l-1) is negligible
//
                H(l, l-1) = Zero;
            }
//
//          Exit from loop if a submatrix of order 1 or 2 has split off.
//
            if (l>=i) {
                break;
            }
//
//          Now the active submatrix is in rows and columns l to i. If
//          eigenvalues only are being computed, only the active submatrix
//          need be transformed.
//
            if (!wantT) {
                i1 = l;
                i2 = i;
            }

            T   H11, H11S, H22, t, u, x, y;
            PT  H10, H21, s, sx;
            if (its==10) {
//
//              Exceptional shift.
//
                s = Dat1*abs(real(H(l+1,l)));
                t = s + H(l,l);
            } else if (its==20) {
//
//              Exceptional shift.
//
                s = Dat1*abs(real(H(i,i-1)));
                t = s + H(i,i);
            } else {
//
//
//              Wilkinson's shift.
//
//
                t = H(i,i);
                u = sqrt(H(i-1,i)) * sqrt(H(i,i-1));
                s = abs1(u);

                if (s != RZero) {
                    x  = RHalf * (H(i-1,i-1) - t);
                    sx = abs1(x);
                    s  = max(s, abs1(x));
                    y  = s * sqrt(pow(x/s,2) + pow(u/s,2));
                    if (sx > RZero) {
                        PT tmp = real(x/sx)*real(y)
                               + imag(x/sx)*imag(y);

                        if (tmp<RZero) {
                            y = -y;
                        }
                    }
                    t -= u*ladiv(u, x+y);
                }
            }
//
//          Look for two consecutive small subdiagonal elements.
//
            IndexType m;
            for (m=i-1; m>=l+1; --m) {
//
//          Determine the effect of starting the single-shift QR
//          iteration at row M, and see if this would make H(M,M-1)
//          negligible.
//
                H11 = H(m, m);
                H22 = H(m+1, m+1);
                H11S = H11 - t;
                H21 = real(H(m+1,m));
                s = abs1(H11S) + abs(H21);
                H11S /= s;
                H21  /= s;
                v(1) = H11S;
                v(2) = H21;
                H10 = real(H(m,m-1));
                if (abs(H10)*abs(H21)<=ulp*(abs1(H11S)*(abs1(H11)+abs1(H22)))) {
                    goto SINGLE_SHIFT;
                }
            }
            H11  = H(l, l);
            H22  = H(l+1, l+1);
            H11S = H11 - t;
            H21  = real(H(l+1,l));
            s    = abs1(H11S) + abs(H21);
            H11S /= s;
            H21  /= s;
            v(1) = H11S;
            v(2) = H21;

        SINGLE_SHIFT:
//
//          Single-shift QR step
//
            for (k=m; k<=i-1; ++k) {
//
//              The first iteration of this loop determines a reflection G
//              from the vector V and applies it from left and right to H,
//              thus creating a nonzero bulge below the subdiagonal.
//
//              Each subsequent iteration determines a reflection G to
//              restore the Hessenberg form in the (K-1)th column, and thus
//              chases the bulge one step toward the bottom of the active
//              submatrix.
//
//              V(2) is always real before the call to ZLARFG, and hence
//              after the call T2 ( = T1*V(2) ) is also real.
//
                T  t1, v2;
                PT t2;

                if (k>m) {
                    v = H(_(k,k+1),k-1);
                }
                larfg(2, v(1), v(_(2,2)), t1);
                if (k>m) {
                    H(k,  k-1) = v(1);
                    H(k+1,k-1) = Zero;
                }
                v2 = v(2);
                t2 = real(t1*v2);
//
//              Apply G from the left to transform the rows of the matrix
//              in columns K to I2.
//
                for (IndexType j=k; j<=i2; ++j) {
                    const T sum = conj(t1)*H(k,j) + t2*H(k+1,j);
                    H(k,  j) -= sum;
                    H(k+1,j) -= sum*v2;
                }
//
//              Apply G from the right to transform the columns of the
//              matrix in rows I1 to min(K+2,I).
//
                for (IndexType j=i1; j<=min(k+2,i); ++j) {
                    T sum = t1*H(j,k) + t2*H(j,k+1);
                    H(j,k)   -= sum;
                    H(j,k+1) -= sum*conj(v2);
                }

                if (wantZ) {
//
//                  Accumulate transformations in the matrix Z
//
                    for (IndexType j=iLoZ; j<=iHiZ; ++j) {
                        T sum = t1*Z(j,k) + t2*Z(j,k+1);
                        Z(j,k)   -= sum;
                        Z(j,k+1) -= sum*conj(v2);
                    }
                }

                if (k==m && m>l) {
//
//                  If the QR step was started at row M > L because two
//                  consecutive small subdiagonals were found, then extra
//                  scaling must be performed to ensure that H(M,M-1) remains
//                  real.
//
                    T tmp = One - t1;
                    tmp /= abs(tmp);
                    H(m+1,m) *= conj(tmp);
                    if (m+2 <= i) {
                        H(m+2,m+1) *= tmp;
                    }
                    for (IndexType j=m; j<=i; ++j) {
                        if (j != m+1) {
                            if (i2>j) {
                                H(j,_(j+1,i2)) *= tmp;
                            }
                            H(_(i1,j-1),j) *= conj(tmp);
                            if (wantZ) {
                                Z(_(iLoZ,iHiZ),j) *= conj(tmp);
                            }
                        }
                    }
                }
            }

//
//          Ensure that H(I,I-1) is real.
//
            T tmp = H(i,i-1);
            if (imag(tmp) != RZero) {
                PT rTmp = abs(tmp);
                H(i,i-1) = rTmp;
                tmp /= rTmp;
                if (i2>i) {
                    H(i,_(i+1,i2)) *= conj(tmp);
                }
                H(_(i1,i-1),i) *= tmp;
                if (wantZ) {
                    Z(_(iLoZ,iHiZ),i) *= tmp;
                }
            }
        }
//
//      Failure to converge in remaining number of iterations
//
        if (its>itMax) {
            return i;
        }

//
//      H(I,I-1) is negligible: one eigenvalue has converged.
//
        w(i) = H(i,i);
//
//      return to start of the main loop with new value of I.
//
        i = l - 1;
    }
    return 0;
}

// namespace generic

//== interface for native lapack ===============================================

#ifdef USE_CXXLAPACK

namespace external {

//
//  Real variant
//
template <typename IndexType, typename MH, typename VWR, typename VWI,
          typename MZ>
IndexType
lahqr_impl(bool                   wantT,
           bool                   wantZ,
           IndexType              iLo,
           IndexType              iHi,
           GeMatrix<MH>           &H,
           DenseVector<VWR>       &wr,
           DenseVector<VWI>       &wi,
           IndexType              iLoZ,
           IndexType              iHiZ,
           GeMatrix<MZ>           &Z)
{
    IndexType  info;
    info = cxxlapack::lahqr<IndexType>(wantT,
                                       wantZ,
                                       H.numRows(),
                                       iLo,
                                       iHi,
                                       H.data(),
                                       H.leadingDimension(),
                                       wr.data(),
                                       wi.data(),
                                       iLoZ,
                                       iHiZ,
                                       Z.data(),
                                       Z.leadingDimension());
    return info;
}

//
//  Complex variant
//
template <typename IndexType, typename MH, typename VW, typename MZ>
IndexType
lahqr_impl(bool                   wantT,
           bool                   wantZ,
           IndexType              iLo,
           IndexType              iHi,
           GeMatrix<MH>           &H,
           DenseVector<VW>        &w,
           IndexType              iLoZ,
           IndexType              iHiZ,
           GeMatrix<MZ>           &Z)
{
    IndexType  info;
    info = cxxlapack::lahqr<IndexType>(wantT,
                                       wantZ,
                                       H.numRows(),
                                       iLo,
                                       iHi,
                                       H.data(),
                                       H.leadingDimension(),
                                       w.data(),
                                       iLoZ,
                                       iHiZ,
                                       Z.data(),
                                       Z.leadingDimension());
    return info;
}

// namespace external

#endif // USE_CXXLAPACK

//== public interface ==========================================================
//
//  Real variant
//
template <typename IndexType, typename MH, typename VWR, typename VWI,
          typename MZ>
typename RestrictTo<IsRealGeMatrix<MH>::value
                 && IsRealDenseVector<VWR>::value
                 && IsRealDenseVector<VWI>::value
                 && IsRealGeMatrix<MZ>::value,
         IndexType>::Type
lahqr(bool              wantT,
      bool              wantZ,
      IndexType         iLo,
      IndexType         iHi,
      MH                &&H,
      VWR               &&wr,
      VWI               &&wi,
      IndexType         iLoZ,
      IndexType         iHiZ,
      MZ                &&Z)
{
    LAPACK_DEBUG_OUT("lahqr");

//
//  Remove references from rvalue types
//
#   ifdef CHECK_CXXLAPACK
    typedef typename RemoveRef<MH>::Type        MatrixH;
    typedef typename RemoveRef<VWR>::Type       VectorWR;
    typedef typename RemoveRef<VWI>::Type       VectorWI;
    typedef typename RemoveRef<MZ>::Type        MatrixZ;
#   endif

//
//  Test the input parameters
//
    using std::max;

    ASSERT(H.firstRow()==1);
    ASSERT(H.firstCol()==1);
    ASSERT(H.numRows()==H.numCols());
    ASSERT(wr.firstIndex()==1);
    ASSERT(wr.length()==H.numRows());
    ASSERT(wi.firstIndex()==1);
    ASSERT(wi.length()==H.numRows());
    ASSERT(wantZ || (Z.numRows()==H.numCols()));
    ASSERT(wantZ || (Z.numCols()==H.numCols()));

    // 1 <= ILO <= max(1,IHI); IHI <= N.
    ASSERT(1<=iLo);
    ASSERT(iLo<=max(IndexType(1), iHi));
    ASSERT(iHi<=H.numRows());

    // 1 <= ILOZ <= ILO; IHI <= IHIZ <= N.
    ASSERT(1<=iLoZ);
    ASSERT(iLoZ<=iLo);
    ASSERT(iHi<=iHiZ);
    ASSERT(iHiZ<=H.numRows());

//
//  Make copies of output arguments
//
#   ifdef CHECK_CXXLAPACK
    typename MatrixH::NoView        H_org    = H;
    typename MatrixZ::NoView        Z_org    = Z;

    typename MatrixH::NoView        H_       = H;
    typename VectorWR::NoView       wr_      = wr;
    typename VectorWI::NoView       wi_      = wi;
    typename MatrixZ::NoView        Z_       = Z;
#   endif

//
//  Call implementation
//
    IndexType info = LAPACK_SELECT::lahqr_impl(wantT, wantZ, iLo,  iHi,
                                               H, wr, wi, iLoZ, iHiZ, Z);

//
//  Compare results
//
#   ifdef CHECK_CXXLAPACK
    IndexType info_ = external::lahqr_impl(wantT, wantZ, iLo,  iHi,
                                           H_, wr_, wi_, iLoZ, iHiZ, Z_);

    bool failed = false;
    if (! isIdentical(H, H_, " H""H_")) {
        std::cerr << "CXXLAPACK:  H = " << H << std::endl;
        std::cerr << "F77LAPACK: H_ = " << H_ << std::endl;
        failed = true;
    }

    if (! isIdentical(wr, wr_, " wr""wr_")) {
        std::cerr << "CXXLAPACK:  wr = " << wr << std::endl;
        std::cerr << "F77LAPACK: wr_ = " << wr_ << std::endl;
        failed = true;
    }

    if (! isIdentical(wi, wi_, " wi""wi_")) {
        std::cerr << "CXXLAPACK:  wi = " << wi << std::endl;
        std::cerr << "F77LAPACK: wi_ = " << wi_ << std::endl;
        failed = true;
    }

    if (! isIdentical(Z, Z_, " Z""Z_")) {
        std::cerr << "CXXLAPACK:  Z = " << Z << std::endl;
        std::cerr << "F77LAPACK: Z_ = " << Z_ << std::endl;
        failed = true;
    }

    if (! isIdentical(info, info_, " info""info_")) {
        std::cerr << "CXXLAPACK:  info = " << info << std::endl;
        std::cerr << "F77LAPACK: info_ = " << info_ << std::endl;
        failed = true;
    }

    if (failed) {
        std::cerr << "H_org = " << H_org << std::endl;
        std::cerr << "Z_org = " << Z_org << std::endl;
        std::cerr << "wantT = " << wantT
                  << ", wantZ = " << wantZ
                  << ", iLo = " << iLo
                  << ", iHi = " << iHi
                  << std::endl;
        std::cerr << "error in: lahqr.tcc" << std::endl;
        ASSERT(0);
    } else {
//        std::cerr << "passed: lahqr.tcc" << std::endl;
    }
#   endif
    return info;
}

//
//  Complex variant
//
template <typename IndexType, typename MH, typename VW, typename MZ>
typename RestrictTo<IsComplexGeMatrix<MH>::value
                 && IsComplexDenseVector<VW>::value
                 && IsComplexGeMatrix<MZ>::value,
         IndexType>::Type
lahqr(bool              wantT,
      bool              wantZ,
      IndexType         iLo,
      IndexType         iHi,
      MH                &&H,
      VW                &&w,
      IndexType         iLoZ,
      IndexType         iHiZ,
      MZ                &&Z)
{
    LAPACK_DEBUG_OUT("lahqr (complex)");

//
//  Remove references from rvalue types
//
#   ifdef CHECK_CXXLAPACK
    typedef typename RemoveRef<MH>::Type        MatrixH;
    typedef typename RemoveRef<VW>::Type        VectorW;
    typedef typename RemoveRef<MZ>::Type        MatrixZ;
#   endif

//
//  Test the input parameters
//
    using std::max;

    ASSERT(H.firstRow()==1);
    ASSERT(H.firstCol()==1);
    ASSERT(H.numRows()==H.numCols());
    ASSERT(w.firstIndex()==1);
    ASSERT(w.length()==H.numRows());
    ASSERT(wantZ || (Z.numRows()==H.numCols()));
    ASSERT(wantZ || (Z.numCols()==H.numCols()));

    // 1 <= ILO <= max(1,IHI); IHI <= N.
    ASSERT(1<=iLo);
    ASSERT(iLo<=max(IndexType(1), iHi));
    ASSERT(iHi<=H.numRows());

    // 1 <= ILOZ <= ILO; IHI <= IHIZ <= N.
    ASSERT(1<=iLoZ);
    ASSERT(iLoZ<=iLo);
    ASSERT(iHi<=iHiZ);
    ASSERT(iHiZ<=H.numRows());

//
//  Make copies of output arguments
//
#   ifdef CHECK_CXXLAPACK
    typename MatrixH::NoView        H_org    = H;
    typename MatrixZ::NoView        Z_org    = Z;

    typename MatrixH::NoView        H_       = H;
    typename VectorW::NoView        w_       = w;
    typename MatrixZ::NoView        Z_       = Z;
#   endif

//
//  Call implementation
//
    IndexType info = LAPACK_SELECT::lahqr_impl(wantT, wantZ, iLo,  iHi,
                                               H, w, iLoZ, iHiZ, Z);

//
//  Compare results
//
#   ifdef CHECK_CXXLAPACK
    IndexType info_ = external::lahqr_impl(wantT, wantZ, iLo,  iHi,
                                           H_, w_, iLoZ, iHiZ, Z_);

    bool failed = false;
    if (! isIdentical(H, H_, " H""H_")) {
        std::cerr << "CXXLAPACK:  H = " << H << std::endl;
        std::cerr << "F77LAPACK: H_ = " << H_ << std::endl;
        failed = true;
    }

    if (! isIdentical(w, w_, " w""w_")) {
        std::cerr << "CXXLAPACK:  w = " << w << std::endl;
        std::cerr << "F77LAPACK: w_ = " << w_ << std::endl;
        failed = true;
    }

    if (! isIdentical(Z, Z_, " Z""Z_")) {
        std::cerr << "CXXLAPACK:  Z = " << Z << std::endl;
        std::cerr << "F77LAPACK: Z_ = " << Z_ << std::endl;
        failed = true;
    }

    if (! isIdentical(info, info_, " info""info_")) {
        std::cerr << "CXXLAPACK:  info = " << info << std::endl;
        std::cerr << "F77LAPACK: info_ = " << info_ << std::endl;
        failed = true;
    }

    if (failed) {
        std::cerr << "H_org = " << H_org << std::endl;
        std::cerr << "Z_org = " << Z_org << std::endl;
        std::cerr << "wantT = " << wantT
                  << ", wantZ = " << wantZ
                  << ", iLo = " << iLo
                  << ", iHi = " << iHi
                  << std::endl;
        std::cerr << "error in: lahqr.tcc" << std::endl;
        ASSERT(0);
    } else {
//        std::cerr << "passed: lahqr.tcc" << std::endl;
    }
#   endif
    return info;
}

} } // namespace lapack, flens

#endif // FLENS_LAPACK_LA_LAHQR_TCC